2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
97 #define TG3_DEF_MAC_MODE 0
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
110 /* length of time before we decide the hardware is borked,
111 * and dev->tx_timeout() should be called to fix the problem
114 #define TG3_TX_TIMEOUT (5 * HZ)
116 /* hardware minimum and maximum for a single frame's data payload */
117 #define TG3_MIN_MTU 60
118 #define TG3_MAX_MTU(tp) \
119 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
121 /* These numbers seem to be hard coded in the NIC firmware somehow.
122 * You can't change the ring sizes, but you can change where you place
123 * them in the NIC onboard memory.
125 #define TG3_RX_STD_RING_SIZE(tp) \
126 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
127 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
128 #define TG3_DEF_RX_RING_PENDING 200
129 #define TG3_RX_JMB_RING_SIZE(tp) \
130 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
132 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
133 #define TG3_RSS_INDIR_TBL_SIZE 128
135 /* Do not place this n-ring entries value into the tp struct itself,
136 * we really want to expose these constants to GCC so that modulo et
137 * al. operations are done with shifts and masks instead of with
138 * hw multiply/modulo instructions. Another solution would be to
139 * replace things like '% foo' with '& (foo - 1)'.
142 #define TG3_TX_RING_SIZE 512
143 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
145 #define TG3_RX_STD_RING_BYTES(tp) \
146 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
147 #define TG3_RX_JMB_RING_BYTES(tp) \
148 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
149 #define TG3_RX_RCB_RING_BYTES(tp) \
150 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
151 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
153 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
155 #define TG3_DMA_BYTE_ENAB 64
157 #define TG3_RX_STD_DMA_SZ 1536
158 #define TG3_RX_JMB_DMA_SZ 9046
160 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
162 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
163 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
165 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
166 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
168 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
169 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
171 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
172 * that are at least dword aligned when used in PCIX mode. The driver
173 * works around this bug by double copying the packet. This workaround
174 * is built into the normal double copy length check for efficiency.
176 * However, the double copy is only necessary on those architectures
177 * where unaligned memory accesses are inefficient. For those architectures
178 * where unaligned memory accesses incur little penalty, we can reintegrate
179 * the 5701 in the normal rx path. Doing so saves a device structure
180 * dereference by hardcoding the double copy threshold in place.
182 #define TG3_RX_COPY_THRESHOLD 256
183 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
184 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
186 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
189 /* minimum number of free TX descriptors required to wake up TX process */
190 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
192 #define TG3_RAW_IP_ALIGN 2
194 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
196 #define FIRMWARE_TG3 "tigon/tg3.bin"
197 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
198 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
200 static char version[] __devinitdata =
201 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
203 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
204 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
205 MODULE_LICENSE("GPL");
206 MODULE_VERSION(DRV_MODULE_VERSION);
207 MODULE_FIRMWARE(FIRMWARE_TG3);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
209 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
211 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
212 module_param(tg3_debug, int, 0);
213 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
215 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
290 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
295 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
296 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
300 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
302 static const struct {
303 const char string[ETH_GSTRING_LEN];
304 } ethtool_stats_keys[] = {
307 { "rx_ucast_packets" },
308 { "rx_mcast_packets" },
309 { "rx_bcast_packets" },
311 { "rx_align_errors" },
312 { "rx_xon_pause_rcvd" },
313 { "rx_xoff_pause_rcvd" },
314 { "rx_mac_ctrl_rcvd" },
315 { "rx_xoff_entered" },
316 { "rx_frame_too_long_errors" },
318 { "rx_undersize_packets" },
319 { "rx_in_length_errors" },
320 { "rx_out_length_errors" },
321 { "rx_64_or_less_octet_packets" },
322 { "rx_65_to_127_octet_packets" },
323 { "rx_128_to_255_octet_packets" },
324 { "rx_256_to_511_octet_packets" },
325 { "rx_512_to_1023_octet_packets" },
326 { "rx_1024_to_1522_octet_packets" },
327 { "rx_1523_to_2047_octet_packets" },
328 { "rx_2048_to_4095_octet_packets" },
329 { "rx_4096_to_8191_octet_packets" },
330 { "rx_8192_to_9022_octet_packets" },
337 { "tx_flow_control" },
339 { "tx_single_collisions" },
340 { "tx_mult_collisions" },
342 { "tx_excessive_collisions" },
343 { "tx_late_collisions" },
344 { "tx_collide_2times" },
345 { "tx_collide_3times" },
346 { "tx_collide_4times" },
347 { "tx_collide_5times" },
348 { "tx_collide_6times" },
349 { "tx_collide_7times" },
350 { "tx_collide_8times" },
351 { "tx_collide_9times" },
352 { "tx_collide_10times" },
353 { "tx_collide_11times" },
354 { "tx_collide_12times" },
355 { "tx_collide_13times" },
356 { "tx_collide_14times" },
357 { "tx_collide_15times" },
358 { "tx_ucast_packets" },
359 { "tx_mcast_packets" },
360 { "tx_bcast_packets" },
361 { "tx_carrier_sense_errors" },
365 { "dma_writeq_full" },
366 { "dma_write_prioq_full" },
370 { "rx_threshold_hit" },
372 { "dma_readq_full" },
373 { "dma_read_prioq_full" },
374 { "tx_comp_queue_full" },
376 { "ring_set_send_prod_index" },
377 { "ring_status_update" },
379 { "nic_avoided_irqs" },
380 { "nic_tx_threshold_hit" },
382 { "mbuf_lwm_thresh_hit" },
385 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
388 static const struct {
389 const char string[ETH_GSTRING_LEN];
390 } ethtool_test_keys[] = {
391 { "nvram test (online) " },
392 { "link test (online) " },
393 { "register test (offline)" },
394 { "memory test (offline)" },
395 { "loopback test (offline)" },
396 { "interrupt test (offline)" },
399 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
402 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
404 writel(val, tp->regs + off);
407 static u32 tg3_read32(struct tg3 *tp, u32 off)
409 return readl(tp->regs + off);
412 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
414 writel(val, tp->aperegs + off);
417 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
419 return readl(tp->aperegs + off);
422 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
426 spin_lock_irqsave(&tp->indirect_lock, flags);
427 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
428 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
429 spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
434 writel(val, tp->regs + off);
435 readl(tp->regs + off);
438 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
443 spin_lock_irqsave(&tp->indirect_lock, flags);
444 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
445 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
446 spin_unlock_irqrestore(&tp->indirect_lock, flags);
450 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
454 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
455 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
456 TG3_64BIT_REG_LOW, val);
459 if (off == TG3_RX_STD_PROD_IDX_REG) {
460 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
461 TG3_64BIT_REG_LOW, val);
465 spin_lock_irqsave(&tp->indirect_lock, flags);
466 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
467 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
468 spin_unlock_irqrestore(&tp->indirect_lock, flags);
470 /* In indirect mode when disabling interrupts, we also need
471 * to clear the interrupt bit in the GRC local ctrl register.
473 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
475 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
476 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
480 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
485 spin_lock_irqsave(&tp->indirect_lock, flags);
486 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
487 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
488 spin_unlock_irqrestore(&tp->indirect_lock, flags);
492 /* usec_wait specifies the wait time in usec when writing to certain registers
493 * where it is unsafe to read back the register without some delay.
494 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
495 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
497 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
499 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
500 /* Non-posted methods */
501 tp->write32(tp, off, val);
504 tg3_write32(tp, off, val);
509 /* Wait again after the read for the posted method to guarantee that
510 * the wait time is met.
516 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
518 tp->write32_mbox(tp, off, val);
519 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
520 tp->read32_mbox(tp, off);
523 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
525 void __iomem *mbox = tp->regs + off;
527 if (tg3_flag(tp, TXD_MBOX_HWBUG))
529 if (tg3_flag(tp, MBOX_WRITE_REORDER))
533 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
535 return readl(tp->regs + off + GRCMBOX_BASE);
538 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
540 writel(val, tp->regs + off + GRCMBOX_BASE);
543 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
544 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
545 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
546 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
547 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
549 #define tw32(reg, val) tp->write32(tp, reg, val)
550 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
551 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
552 #define tr32(reg) tp->read32(tp, reg)
554 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
559 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562 spin_lock_irqsave(&tp->indirect_lock, flags);
563 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
564 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
565 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
567 /* Always leave this as zero. */
568 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
570 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
571 tw32_f(TG3PCI_MEM_WIN_DATA, val);
573 /* Always leave this as zero. */
574 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
576 spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
583 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
584 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
589 spin_lock_irqsave(&tp->indirect_lock, flags);
590 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
591 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
592 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
594 /* Always leave this as zero. */
595 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
597 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
598 *val = tr32(TG3PCI_MEM_WIN_DATA);
600 /* Always leave this as zero. */
601 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
603 spin_unlock_irqrestore(&tp->indirect_lock, flags);
606 static void tg3_ape_lock_init(struct tg3 *tp)
611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
612 regbase = TG3_APE_LOCK_GRANT;
614 regbase = TG3_APE_PER_LOCK_GRANT;
616 /* Make sure the driver hasn't any stale locks. */
617 for (i = 0; i < 8; i++)
618 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
621 static int tg3_ape_lock(struct tg3 *tp, int locknum)
625 u32 status, req, gnt;
627 if (!tg3_flag(tp, ENABLE_APE))
631 case TG3_APE_LOCK_GRC:
632 case TG3_APE_LOCK_MEM:
638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
639 req = TG3_APE_LOCK_REQ;
640 gnt = TG3_APE_LOCK_GRANT;
642 req = TG3_APE_PER_LOCK_REQ;
643 gnt = TG3_APE_PER_LOCK_GRANT;
648 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
650 /* Wait for up to 1 millisecond to acquire lock. */
651 for (i = 0; i < 100; i++) {
652 status = tg3_ape_read32(tp, gnt + off);
653 if (status == APE_LOCK_GRANT_DRIVER)
658 if (status != APE_LOCK_GRANT_DRIVER) {
659 /* Revoke the lock request. */
660 tg3_ape_write32(tp, gnt + off,
661 APE_LOCK_GRANT_DRIVER);
669 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
673 if (!tg3_flag(tp, ENABLE_APE))
677 case TG3_APE_LOCK_GRC:
678 case TG3_APE_LOCK_MEM:
684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
685 gnt = TG3_APE_LOCK_GRANT;
687 gnt = TG3_APE_PER_LOCK_GRANT;
689 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
692 static void tg3_disable_ints(struct tg3 *tp)
696 tw32(TG3PCI_MISC_HOST_CTRL,
697 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
698 for (i = 0; i < tp->irq_max; i++)
699 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
702 static void tg3_enable_ints(struct tg3 *tp)
709 tw32(TG3PCI_MISC_HOST_CTRL,
710 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
712 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
713 for (i = 0; i < tp->irq_cnt; i++) {
714 struct tg3_napi *tnapi = &tp->napi[i];
716 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
717 if (tg3_flag(tp, 1SHOT_MSI))
718 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
720 tp->coal_now |= tnapi->coal_now;
723 /* Force an initial interrupt */
724 if (!tg3_flag(tp, TAGGED_STATUS) &&
725 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
726 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
728 tw32(HOSTCC_MODE, tp->coal_now);
730 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
733 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
735 struct tg3 *tp = tnapi->tp;
736 struct tg3_hw_status *sblk = tnapi->hw_status;
737 unsigned int work_exists = 0;
739 /* check for phy events */
740 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
741 if (sblk->status & SD_STATUS_LINK_CHG)
744 /* check for RX/TX work to do */
745 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
746 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
753 * similar to tg3_enable_ints, but it accurately determines whether there
754 * is new work pending and can return without flushing the PIO write
755 * which reenables interrupts
757 static void tg3_int_reenable(struct tg3_napi *tnapi)
759 struct tg3 *tp = tnapi->tp;
761 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
764 /* When doing tagged status, this work check is unnecessary.
765 * The last_tag we write above tells the chip which piece of
766 * work we've completed.
768 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
769 tw32(HOSTCC_MODE, tp->coalesce_mode |
770 HOSTCC_MODE_ENABLE | tnapi->coal_now);
773 static void tg3_switch_clocks(struct tg3 *tp)
778 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
781 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
783 orig_clock_ctrl = clock_ctrl;
784 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
785 CLOCK_CTRL_CLKRUN_OENABLE |
787 tp->pci_clock_ctrl = clock_ctrl;
789 if (tg3_flag(tp, 5705_PLUS)) {
790 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
791 tw32_wait_f(TG3PCI_CLOCK_CTRL,
792 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
794 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
795 tw32_wait_f(TG3PCI_CLOCK_CTRL,
797 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
799 tw32_wait_f(TG3PCI_CLOCK_CTRL,
800 clock_ctrl | (CLOCK_CTRL_ALTCLK),
803 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
806 #define PHY_BUSY_LOOPS 5000
808 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
814 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
816 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
822 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
823 MI_COM_PHY_ADDR_MASK);
824 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
825 MI_COM_REG_ADDR_MASK);
826 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
828 tw32_f(MAC_MI_COM, frame_val);
830 loops = PHY_BUSY_LOOPS;
833 frame_val = tr32(MAC_MI_COM);
835 if ((frame_val & MI_COM_BUSY) == 0) {
837 frame_val = tr32(MAC_MI_COM);
845 *val = frame_val & MI_COM_DATA_MASK;
849 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
850 tw32_f(MAC_MI_MODE, tp->mi_mode);
857 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
863 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
864 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
867 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
869 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
873 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
874 MI_COM_PHY_ADDR_MASK);
875 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
876 MI_COM_REG_ADDR_MASK);
877 frame_val |= (val & MI_COM_DATA_MASK);
878 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
880 tw32_f(MAC_MI_COM, frame_val);
882 loops = PHY_BUSY_LOOPS;
885 frame_val = tr32(MAC_MI_COM);
886 if ((frame_val & MI_COM_BUSY) == 0) {
888 frame_val = tr32(MAC_MI_COM);
898 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
899 tw32_f(MAC_MI_MODE, tp->mi_mode);
906 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
910 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
914 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
918 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
919 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
923 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
929 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
933 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
937 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
941 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
942 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
946 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
952 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
956 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
958 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
963 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
967 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
969 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
974 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
978 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
979 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
980 MII_TG3_AUXCTL_SHDWSEL_MISC);
982 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
987 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
989 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
990 set |= MII_TG3_AUXCTL_MISC_WREN;
992 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
995 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
996 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
997 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
998 MII_TG3_AUXCTL_ACTL_TX_6DB)
1000 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1001 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1002 MII_TG3_AUXCTL_ACTL_TX_6DB);
1004 static int tg3_bmcr_reset(struct tg3 *tp)
1009 /* OK, reset it, and poll the BMCR_RESET bit until it
1010 * clears or we time out.
1012 phy_control = BMCR_RESET;
1013 err = tg3_writephy(tp, MII_BMCR, phy_control);
1019 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1023 if ((phy_control & BMCR_RESET) == 0) {
1035 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1037 struct tg3 *tp = bp->priv;
1040 spin_lock_bh(&tp->lock);
1042 if (tg3_readphy(tp, reg, &val))
1045 spin_unlock_bh(&tp->lock);
1050 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1052 struct tg3 *tp = bp->priv;
1055 spin_lock_bh(&tp->lock);
1057 if (tg3_writephy(tp, reg, val))
1060 spin_unlock_bh(&tp->lock);
1065 static int tg3_mdio_reset(struct mii_bus *bp)
1070 static void tg3_mdio_config_5785(struct tg3 *tp)
1073 struct phy_device *phydev;
1075 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1076 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1077 case PHY_ID_BCM50610:
1078 case PHY_ID_BCM50610M:
1079 val = MAC_PHYCFG2_50610_LED_MODES;
1081 case PHY_ID_BCMAC131:
1082 val = MAC_PHYCFG2_AC131_LED_MODES;
1084 case PHY_ID_RTL8211C:
1085 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1087 case PHY_ID_RTL8201E:
1088 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1094 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1095 tw32(MAC_PHYCFG2, val);
1097 val = tr32(MAC_PHYCFG1);
1098 val &= ~(MAC_PHYCFG1_RGMII_INT |
1099 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1100 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1101 tw32(MAC_PHYCFG1, val);
1106 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1107 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1108 MAC_PHYCFG2_FMODE_MASK_MASK |
1109 MAC_PHYCFG2_GMODE_MASK_MASK |
1110 MAC_PHYCFG2_ACT_MASK_MASK |
1111 MAC_PHYCFG2_QUAL_MASK_MASK |
1112 MAC_PHYCFG2_INBAND_ENABLE;
1114 tw32(MAC_PHYCFG2, val);
1116 val = tr32(MAC_PHYCFG1);
1117 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1118 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1119 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1120 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1121 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1122 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1123 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1125 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1126 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1127 tw32(MAC_PHYCFG1, val);
1129 val = tr32(MAC_EXT_RGMII_MODE);
1130 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1131 MAC_RGMII_MODE_RX_QUALITY |
1132 MAC_RGMII_MODE_RX_ACTIVITY |
1133 MAC_RGMII_MODE_RX_ENG_DET |
1134 MAC_RGMII_MODE_TX_ENABLE |
1135 MAC_RGMII_MODE_TX_LOWPWR |
1136 MAC_RGMII_MODE_TX_RESET);
1137 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1138 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1139 val |= MAC_RGMII_MODE_RX_INT_B |
1140 MAC_RGMII_MODE_RX_QUALITY |
1141 MAC_RGMII_MODE_RX_ACTIVITY |
1142 MAC_RGMII_MODE_RX_ENG_DET;
1143 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1144 val |= MAC_RGMII_MODE_TX_ENABLE |
1145 MAC_RGMII_MODE_TX_LOWPWR |
1146 MAC_RGMII_MODE_TX_RESET;
1148 tw32(MAC_EXT_RGMII_MODE, val);
1151 static void tg3_mdio_start(struct tg3 *tp)
1153 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1154 tw32_f(MAC_MI_MODE, tp->mi_mode);
1157 if (tg3_flag(tp, MDIOBUS_INITED) &&
1158 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1159 tg3_mdio_config_5785(tp);
1162 static int tg3_mdio_init(struct tg3 *tp)
1166 struct phy_device *phydev;
1168 if (tg3_flag(tp, 5717_PLUS)) {
1171 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1173 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1174 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1176 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1177 TG3_CPMU_PHY_STRAP_IS_SERDES;
1181 tp->phy_addr = TG3_PHY_MII_ADDR;
1185 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1188 tp->mdio_bus = mdiobus_alloc();
1189 if (tp->mdio_bus == NULL)
1192 tp->mdio_bus->name = "tg3 mdio bus";
1193 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1194 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1195 tp->mdio_bus->priv = tp;
1196 tp->mdio_bus->parent = &tp->pdev->dev;
1197 tp->mdio_bus->read = &tg3_mdio_read;
1198 tp->mdio_bus->write = &tg3_mdio_write;
1199 tp->mdio_bus->reset = &tg3_mdio_reset;
1200 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1201 tp->mdio_bus->irq = &tp->mdio_irq[0];
1203 for (i = 0; i < PHY_MAX_ADDR; i++)
1204 tp->mdio_bus->irq[i] = PHY_POLL;
1206 /* The bus registration will look for all the PHYs on the mdio bus.
1207 * Unfortunately, it does not ensure the PHY is powered up before
1208 * accessing the PHY ID registers. A chip reset is the
1209 * quickest way to bring the device back to an operational state..
1211 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1214 i = mdiobus_register(tp->mdio_bus);
1216 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1217 mdiobus_free(tp->mdio_bus);
1221 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1223 if (!phydev || !phydev->drv) {
1224 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1225 mdiobus_unregister(tp->mdio_bus);
1226 mdiobus_free(tp->mdio_bus);
1230 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1231 case PHY_ID_BCM57780:
1232 phydev->interface = PHY_INTERFACE_MODE_GMII;
1233 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1235 case PHY_ID_BCM50610:
1236 case PHY_ID_BCM50610M:
1237 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1238 PHY_BRCM_RX_REFCLK_UNUSED |
1239 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1240 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1241 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1242 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1243 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1244 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1245 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1246 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1248 case PHY_ID_RTL8211C:
1249 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1251 case PHY_ID_RTL8201E:
1252 case PHY_ID_BCMAC131:
1253 phydev->interface = PHY_INTERFACE_MODE_MII;
1254 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1255 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1259 tg3_flag_set(tp, MDIOBUS_INITED);
1261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1262 tg3_mdio_config_5785(tp);
1267 static void tg3_mdio_fini(struct tg3 *tp)
1269 if (tg3_flag(tp, MDIOBUS_INITED)) {
1270 tg3_flag_clear(tp, MDIOBUS_INITED);
1271 mdiobus_unregister(tp->mdio_bus);
1272 mdiobus_free(tp->mdio_bus);
1276 /* tp->lock is held. */
1277 static inline void tg3_generate_fw_event(struct tg3 *tp)
1281 val = tr32(GRC_RX_CPU_EVENT);
1282 val |= GRC_RX_CPU_DRIVER_EVENT;
1283 tw32_f(GRC_RX_CPU_EVENT, val);
1285 tp->last_event_jiffies = jiffies;
1288 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1290 /* tp->lock is held. */
1291 static void tg3_wait_for_event_ack(struct tg3 *tp)
1294 unsigned int delay_cnt;
1297 /* If enough time has passed, no wait is necessary. */
1298 time_remain = (long)(tp->last_event_jiffies + 1 +
1299 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1301 if (time_remain < 0)
1304 /* Check if we can shorten the wait time. */
1305 delay_cnt = jiffies_to_usecs(time_remain);
1306 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1307 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1308 delay_cnt = (delay_cnt >> 3) + 1;
1310 for (i = 0; i < delay_cnt; i++) {
1311 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1317 /* tp->lock is held. */
1318 static void tg3_ump_link_report(struct tg3 *tp)
1323 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1326 tg3_wait_for_event_ack(tp);
1328 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1330 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1333 if (!tg3_readphy(tp, MII_BMCR, ®))
1335 if (!tg3_readphy(tp, MII_BMSR, ®))
1336 val |= (reg & 0xffff);
1337 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1340 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1342 if (!tg3_readphy(tp, MII_LPA, ®))
1343 val |= (reg & 0xffff);
1344 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1347 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1348 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1350 if (!tg3_readphy(tp, MII_STAT1000, ®))
1351 val |= (reg & 0xffff);
1353 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1355 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1359 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1361 tg3_generate_fw_event(tp);
1364 static void tg3_link_report(struct tg3 *tp)
1366 if (!netif_carrier_ok(tp->dev)) {
1367 netif_info(tp, link, tp->dev, "Link is down\n");
1368 tg3_ump_link_report(tp);
1369 } else if (netif_msg_link(tp)) {
1370 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1371 (tp->link_config.active_speed == SPEED_1000 ?
1373 (tp->link_config.active_speed == SPEED_100 ?
1375 (tp->link_config.active_duplex == DUPLEX_FULL ?
1378 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1379 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1381 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1384 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1385 netdev_info(tp->dev, "EEE is %s\n",
1386 tp->setlpicnt ? "enabled" : "disabled");
1388 tg3_ump_link_report(tp);
1392 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1396 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1397 miireg = ADVERTISE_PAUSE_CAP;
1398 else if (flow_ctrl & FLOW_CTRL_TX)
1399 miireg = ADVERTISE_PAUSE_ASYM;
1400 else if (flow_ctrl & FLOW_CTRL_RX)
1401 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1408 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1412 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1413 miireg = ADVERTISE_1000XPAUSE;
1414 else if (flow_ctrl & FLOW_CTRL_TX)
1415 miireg = ADVERTISE_1000XPSE_ASYM;
1416 else if (flow_ctrl & FLOW_CTRL_RX)
1417 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1424 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1428 if (lcladv & ADVERTISE_1000XPAUSE) {
1429 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1430 if (rmtadv & LPA_1000XPAUSE)
1431 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1432 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1435 if (rmtadv & LPA_1000XPAUSE)
1436 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1438 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1439 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1446 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1450 u32 old_rx_mode = tp->rx_mode;
1451 u32 old_tx_mode = tp->tx_mode;
1453 if (tg3_flag(tp, USE_PHYLIB))
1454 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1456 autoneg = tp->link_config.autoneg;
1458 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1459 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1460 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1462 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1464 flowctrl = tp->link_config.flowctrl;
1466 tp->link_config.active_flowctrl = flowctrl;
1468 if (flowctrl & FLOW_CTRL_RX)
1469 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1471 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1473 if (old_rx_mode != tp->rx_mode)
1474 tw32_f(MAC_RX_MODE, tp->rx_mode);
1476 if (flowctrl & FLOW_CTRL_TX)
1477 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1479 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1481 if (old_tx_mode != tp->tx_mode)
1482 tw32_f(MAC_TX_MODE, tp->tx_mode);
1485 static void tg3_adjust_link(struct net_device *dev)
1487 u8 oldflowctrl, linkmesg = 0;
1488 u32 mac_mode, lcl_adv, rmt_adv;
1489 struct tg3 *tp = netdev_priv(dev);
1490 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1492 spin_lock_bh(&tp->lock);
1494 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1495 MAC_MODE_HALF_DUPLEX);
1497 oldflowctrl = tp->link_config.active_flowctrl;
1503 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1504 mac_mode |= MAC_MODE_PORT_MODE_MII;
1505 else if (phydev->speed == SPEED_1000 ||
1506 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1507 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1509 mac_mode |= MAC_MODE_PORT_MODE_MII;
1511 if (phydev->duplex == DUPLEX_HALF)
1512 mac_mode |= MAC_MODE_HALF_DUPLEX;
1514 lcl_adv = tg3_advert_flowctrl_1000T(
1515 tp->link_config.flowctrl);
1518 rmt_adv = LPA_PAUSE_CAP;
1519 if (phydev->asym_pause)
1520 rmt_adv |= LPA_PAUSE_ASYM;
1523 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1525 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1527 if (mac_mode != tp->mac_mode) {
1528 tp->mac_mode = mac_mode;
1529 tw32_f(MAC_MODE, tp->mac_mode);
1533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1534 if (phydev->speed == SPEED_10)
1536 MAC_MI_STAT_10MBPS_MODE |
1537 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1539 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1542 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1543 tw32(MAC_TX_LENGTHS,
1544 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1545 (6 << TX_LENGTHS_IPG_SHIFT) |
1546 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1548 tw32(MAC_TX_LENGTHS,
1549 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1550 (6 << TX_LENGTHS_IPG_SHIFT) |
1551 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1553 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1554 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1555 phydev->speed != tp->link_config.active_speed ||
1556 phydev->duplex != tp->link_config.active_duplex ||
1557 oldflowctrl != tp->link_config.active_flowctrl)
1560 tp->link_config.active_speed = phydev->speed;
1561 tp->link_config.active_duplex = phydev->duplex;
1563 spin_unlock_bh(&tp->lock);
1566 tg3_link_report(tp);
1569 static int tg3_phy_init(struct tg3 *tp)
1571 struct phy_device *phydev;
1573 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1576 /* Bring the PHY back to a known state. */
1579 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1581 /* Attach the MAC to the PHY. */
1582 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1583 phydev->dev_flags, phydev->interface);
1584 if (IS_ERR(phydev)) {
1585 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1586 return PTR_ERR(phydev);
1589 /* Mask with MAC supported features. */
1590 switch (phydev->interface) {
1591 case PHY_INTERFACE_MODE_GMII:
1592 case PHY_INTERFACE_MODE_RGMII:
1593 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1594 phydev->supported &= (PHY_GBIT_FEATURES |
1596 SUPPORTED_Asym_Pause);
1600 case PHY_INTERFACE_MODE_MII:
1601 phydev->supported &= (PHY_BASIC_FEATURES |
1603 SUPPORTED_Asym_Pause);
1606 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1610 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1612 phydev->advertising = phydev->supported;
1617 static void tg3_phy_start(struct tg3 *tp)
1619 struct phy_device *phydev;
1621 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1624 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1626 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1627 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1628 phydev->speed = tp->link_config.orig_speed;
1629 phydev->duplex = tp->link_config.orig_duplex;
1630 phydev->autoneg = tp->link_config.orig_autoneg;
1631 phydev->advertising = tp->link_config.orig_advertising;
1636 phy_start_aneg(phydev);
1639 static void tg3_phy_stop(struct tg3 *tp)
1641 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1644 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1647 static void tg3_phy_fini(struct tg3 *tp)
1649 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1650 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1651 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1655 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1659 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1662 tg3_writephy(tp, MII_TG3_FET_TEST,
1663 phytest | MII_TG3_FET_SHADOW_EN);
1664 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1666 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1668 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1669 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1671 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1675 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1679 if (!tg3_flag(tp, 5705_PLUS) ||
1680 (tg3_flag(tp, 5717_PLUS) &&
1681 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1684 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1685 tg3_phy_fet_toggle_apd(tp, enable);
1689 reg = MII_TG3_MISC_SHDW_WREN |
1690 MII_TG3_MISC_SHDW_SCR5_SEL |
1691 MII_TG3_MISC_SHDW_SCR5_LPED |
1692 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1693 MII_TG3_MISC_SHDW_SCR5_SDTL |
1694 MII_TG3_MISC_SHDW_SCR5_C125OE;
1695 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1696 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1698 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1701 reg = MII_TG3_MISC_SHDW_WREN |
1702 MII_TG3_MISC_SHDW_APD_SEL |
1703 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1705 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1707 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1710 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1714 if (!tg3_flag(tp, 5705_PLUS) ||
1715 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1718 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1721 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1722 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1724 tg3_writephy(tp, MII_TG3_FET_TEST,
1725 ephy | MII_TG3_FET_SHADOW_EN);
1726 if (!tg3_readphy(tp, reg, &phy)) {
1728 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1730 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1731 tg3_writephy(tp, reg, phy);
1733 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1738 ret = tg3_phy_auxctl_read(tp,
1739 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1742 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1744 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1745 tg3_phy_auxctl_write(tp,
1746 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1751 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1756 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1759 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1761 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1762 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1765 static void tg3_phy_apply_otp(struct tg3 *tp)
1774 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1777 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1778 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1779 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1781 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1782 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1783 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1785 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1786 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1787 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1789 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1790 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1792 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1793 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1795 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1796 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1797 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1799 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1802 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1806 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1811 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1812 current_link_up == 1 &&
1813 tp->link_config.active_duplex == DUPLEX_FULL &&
1814 (tp->link_config.active_speed == SPEED_100 ||
1815 tp->link_config.active_speed == SPEED_1000)) {
1818 if (tp->link_config.active_speed == SPEED_1000)
1819 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1821 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1823 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1825 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1826 TG3_CL45_D7_EEERES_STAT, &val);
1828 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1829 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1833 if (!tp->setlpicnt) {
1834 val = tr32(TG3_CPMU_EEE_MODE);
1835 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1839 static void tg3_phy_eee_enable(struct tg3 *tp)
1843 if (tp->link_config.active_speed == SPEED_1000 &&
1844 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1846 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1847 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1848 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1849 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1852 val = tr32(TG3_CPMU_EEE_MODE);
1853 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1856 static int tg3_wait_macro_done(struct tg3 *tp)
1863 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1864 if ((tmp32 & 0x1000) == 0)
1874 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1876 static const u32 test_pat[4][6] = {
1877 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1878 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1879 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1880 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1884 for (chan = 0; chan < 4; chan++) {
1887 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1888 (chan * 0x2000) | 0x0200);
1889 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1891 for (i = 0; i < 6; i++)
1892 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1895 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1896 if (tg3_wait_macro_done(tp)) {
1901 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1902 (chan * 0x2000) | 0x0200);
1903 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1904 if (tg3_wait_macro_done(tp)) {
1909 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1910 if (tg3_wait_macro_done(tp)) {
1915 for (i = 0; i < 6; i += 2) {
1918 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1919 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1920 tg3_wait_macro_done(tp)) {
1926 if (low != test_pat[chan][i] ||
1927 high != test_pat[chan][i+1]) {
1928 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1929 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1930 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1940 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1944 for (chan = 0; chan < 4; chan++) {
1947 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1948 (chan * 0x2000) | 0x0200);
1949 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1950 for (i = 0; i < 6; i++)
1951 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1952 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1953 if (tg3_wait_macro_done(tp))
1960 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1962 u32 reg32, phy9_orig;
1963 int retries, do_phy_reset, err;
1969 err = tg3_bmcr_reset(tp);
1975 /* Disable transmitter and interrupt. */
1976 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1980 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1982 /* Set full-duplex, 1000 mbps. */
1983 tg3_writephy(tp, MII_BMCR,
1984 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1986 /* Set to master mode. */
1987 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1990 tg3_writephy(tp, MII_TG3_CTRL,
1991 (MII_TG3_CTRL_AS_MASTER |
1992 MII_TG3_CTRL_ENABLE_AS_MASTER));
1994 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1998 /* Block the PHY control access. */
1999 tg3_phydsp_write(tp, 0x8005, 0x0800);
2001 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2004 } while (--retries);
2006 err = tg3_phy_reset_chanpat(tp);
2010 tg3_phydsp_write(tp, 0x8005, 0x0000);
2012 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2013 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2015 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2017 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2019 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2021 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2028 /* This will reset the tigon3 PHY if there is no valid
2029 * link unless the FORCE argument is non-zero.
2031 static int tg3_phy_reset(struct tg3 *tp)
2036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2037 val = tr32(GRC_MISC_CFG);
2038 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2041 err = tg3_readphy(tp, MII_BMSR, &val);
2042 err |= tg3_readphy(tp, MII_BMSR, &val);
2046 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2047 netif_carrier_off(tp->dev);
2048 tg3_link_report(tp);
2051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2053 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2054 err = tg3_phy_reset_5703_4_5(tp);
2061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2062 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2063 cpmuctrl = tr32(TG3_CPMU_CTRL);
2064 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2066 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2069 err = tg3_bmcr_reset(tp);
2073 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2074 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2075 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2077 tw32(TG3_CPMU_CTRL, cpmuctrl);
2080 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2081 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2082 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2083 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2084 CPMU_LSPD_1000MB_MACCLK_12_5) {
2085 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2087 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2091 if (tg3_flag(tp, 5717_PLUS) &&
2092 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2095 tg3_phy_apply_otp(tp);
2097 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2098 tg3_phy_toggle_apd(tp, true);
2100 tg3_phy_toggle_apd(tp, false);
2103 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2104 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2105 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2106 tg3_phydsp_write(tp, 0x000a, 0x0323);
2107 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2110 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2111 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2112 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2115 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2116 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2117 tg3_phydsp_write(tp, 0x000a, 0x310b);
2118 tg3_phydsp_write(tp, 0x201f, 0x9506);
2119 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2120 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2122 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2123 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2124 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2125 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2126 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2127 tg3_writephy(tp, MII_TG3_TEST1,
2128 MII_TG3_TEST1_TRIM_EN | 0x4);
2130 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2132 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2136 /* Set Extended packet length bit (bit 14) on all chips that */
2137 /* support jumbo frames */
2138 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2139 /* Cannot do read-modify-write on 5401 */
2140 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2141 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2142 /* Set bit 14 with read-modify-write to preserve other bits */
2143 err = tg3_phy_auxctl_read(tp,
2144 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2146 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2147 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2150 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2151 * jumbo frames transmission.
2153 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2154 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2155 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2156 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2160 /* adjust output voltage */
2161 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2164 tg3_phy_toggle_automdix(tp, 1);
2165 tg3_phy_set_wirespeed(tp);
2169 static void tg3_frob_aux_power(struct tg3 *tp)
2171 bool need_vaux = false;
2173 /* The GPIOs do something completely different on 57765. */
2174 if (!tg3_flag(tp, IS_NIC) ||
2175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2176 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2179 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2181 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2183 tp->pdev_peer != tp->pdev) {
2184 struct net_device *dev_peer;
2186 dev_peer = pci_get_drvdata(tp->pdev_peer);
2188 /* remove_one() may have been run on the peer. */
2190 struct tg3 *tp_peer = netdev_priv(dev_peer);
2192 if (tg3_flag(tp_peer, INIT_COMPLETE))
2195 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2196 tg3_flag(tp_peer, ENABLE_ASF))
2201 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2205 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2206 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2207 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2208 (GRC_LCLCTRL_GPIO_OE0 |
2209 GRC_LCLCTRL_GPIO_OE1 |
2210 GRC_LCLCTRL_GPIO_OE2 |
2211 GRC_LCLCTRL_GPIO_OUTPUT0 |
2212 GRC_LCLCTRL_GPIO_OUTPUT1),
2214 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2215 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2216 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2217 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2218 GRC_LCLCTRL_GPIO_OE1 |
2219 GRC_LCLCTRL_GPIO_OE2 |
2220 GRC_LCLCTRL_GPIO_OUTPUT0 |
2221 GRC_LCLCTRL_GPIO_OUTPUT1 |
2223 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2225 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2226 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2228 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2229 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2232 u32 grc_local_ctrl = 0;
2234 /* Workaround to prevent overdrawing Amps. */
2235 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2237 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2238 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2239 grc_local_ctrl, 100);
2242 /* On 5753 and variants, GPIO2 cannot be used. */
2243 no_gpio2 = tp->nic_sram_data_cfg &
2244 NIC_SRAM_DATA_CFG_NO_GPIO2;
2246 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2247 GRC_LCLCTRL_GPIO_OE1 |
2248 GRC_LCLCTRL_GPIO_OE2 |
2249 GRC_LCLCTRL_GPIO_OUTPUT1 |
2250 GRC_LCLCTRL_GPIO_OUTPUT2;
2252 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2253 GRC_LCLCTRL_GPIO_OUTPUT2);
2255 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2256 grc_local_ctrl, 100);
2258 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2260 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2261 grc_local_ctrl, 100);
2264 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2265 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2266 grc_local_ctrl, 100);
2270 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2271 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2272 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2273 (GRC_LCLCTRL_GPIO_OE1 |
2274 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2276 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2277 GRC_LCLCTRL_GPIO_OE1, 100);
2279 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2280 (GRC_LCLCTRL_GPIO_OE1 |
2281 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2286 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2288 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2290 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2291 if (speed != SPEED_10)
2293 } else if (speed == SPEED_10)
2299 static int tg3_setup_phy(struct tg3 *, int);
2301 #define RESET_KIND_SHUTDOWN 0
2302 #define RESET_KIND_INIT 1
2303 #define RESET_KIND_SUSPEND 2
2305 static void tg3_write_sig_post_reset(struct tg3 *, int);
2306 static int tg3_halt_cpu(struct tg3 *, u32);
2308 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2312 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2313 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2314 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2315 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2318 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2319 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2320 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2327 val = tr32(GRC_MISC_CFG);
2328 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2331 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2333 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2336 tg3_writephy(tp, MII_ADVERTISE, 0);
2337 tg3_writephy(tp, MII_BMCR,
2338 BMCR_ANENABLE | BMCR_ANRESTART);
2340 tg3_writephy(tp, MII_TG3_FET_TEST,
2341 phytest | MII_TG3_FET_SHADOW_EN);
2342 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2343 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2345 MII_TG3_FET_SHDW_AUXMODE4,
2348 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2351 } else if (do_low_power) {
2352 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2353 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2355 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2356 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2357 MII_TG3_AUXCTL_PCTL_VREG_11V;
2358 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2361 /* The PHY should not be powered down on some chips because
2364 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2366 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2367 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2370 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2371 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2372 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2373 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2374 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2375 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2378 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2381 /* tp->lock is held. */
2382 static int tg3_nvram_lock(struct tg3 *tp)
2384 if (tg3_flag(tp, NVRAM)) {
2387 if (tp->nvram_lock_cnt == 0) {
2388 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2389 for (i = 0; i < 8000; i++) {
2390 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2395 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2399 tp->nvram_lock_cnt++;
2404 /* tp->lock is held. */
2405 static void tg3_nvram_unlock(struct tg3 *tp)
2407 if (tg3_flag(tp, NVRAM)) {
2408 if (tp->nvram_lock_cnt > 0)
2409 tp->nvram_lock_cnt--;
2410 if (tp->nvram_lock_cnt == 0)
2411 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2415 /* tp->lock is held. */
2416 static void tg3_enable_nvram_access(struct tg3 *tp)
2418 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2419 u32 nvaccess = tr32(NVRAM_ACCESS);
2421 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2425 /* tp->lock is held. */
2426 static void tg3_disable_nvram_access(struct tg3 *tp)
2428 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2429 u32 nvaccess = tr32(NVRAM_ACCESS);
2431 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2435 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2436 u32 offset, u32 *val)
2441 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2444 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2445 EEPROM_ADDR_DEVID_MASK |
2447 tw32(GRC_EEPROM_ADDR,
2449 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2450 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2451 EEPROM_ADDR_ADDR_MASK) |
2452 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2454 for (i = 0; i < 1000; i++) {
2455 tmp = tr32(GRC_EEPROM_ADDR);
2457 if (tmp & EEPROM_ADDR_COMPLETE)
2461 if (!(tmp & EEPROM_ADDR_COMPLETE))
2464 tmp = tr32(GRC_EEPROM_DATA);
2467 * The data will always be opposite the native endian
2468 * format. Perform a blind byteswap to compensate.
2475 #define NVRAM_CMD_TIMEOUT 10000
2477 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2481 tw32(NVRAM_CMD, nvram_cmd);
2482 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2484 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2490 if (i == NVRAM_CMD_TIMEOUT)
2496 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2498 if (tg3_flag(tp, NVRAM) &&
2499 tg3_flag(tp, NVRAM_BUFFERED) &&
2500 tg3_flag(tp, FLASH) &&
2501 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2502 (tp->nvram_jedecnum == JEDEC_ATMEL))
2504 addr = ((addr / tp->nvram_pagesize) <<
2505 ATMEL_AT45DB0X1B_PAGE_POS) +
2506 (addr % tp->nvram_pagesize);
2511 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2513 if (tg3_flag(tp, NVRAM) &&
2514 tg3_flag(tp, NVRAM_BUFFERED) &&
2515 tg3_flag(tp, FLASH) &&
2516 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2517 (tp->nvram_jedecnum == JEDEC_ATMEL))
2519 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2520 tp->nvram_pagesize) +
2521 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2526 /* NOTE: Data read in from NVRAM is byteswapped according to
2527 * the byteswapping settings for all other register accesses.
2528 * tg3 devices are BE devices, so on a BE machine, the data
2529 * returned will be exactly as it is seen in NVRAM. On a LE
2530 * machine, the 32-bit value will be byteswapped.
2532 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2536 if (!tg3_flag(tp, NVRAM))
2537 return tg3_nvram_read_using_eeprom(tp, offset, val);
2539 offset = tg3_nvram_phys_addr(tp, offset);
2541 if (offset > NVRAM_ADDR_MSK)
2544 ret = tg3_nvram_lock(tp);
2548 tg3_enable_nvram_access(tp);
2550 tw32(NVRAM_ADDR, offset);
2551 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2552 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2555 *val = tr32(NVRAM_RDDATA);
2557 tg3_disable_nvram_access(tp);
2559 tg3_nvram_unlock(tp);
2564 /* Ensures NVRAM data is in bytestream format. */
2565 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2568 int res = tg3_nvram_read(tp, offset, &v);
2570 *val = cpu_to_be32(v);
2574 /* tp->lock is held. */
2575 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2577 u32 addr_high, addr_low;
2580 addr_high = ((tp->dev->dev_addr[0] << 8) |
2581 tp->dev->dev_addr[1]);
2582 addr_low = ((tp->dev->dev_addr[2] << 24) |
2583 (tp->dev->dev_addr[3] << 16) |
2584 (tp->dev->dev_addr[4] << 8) |
2585 (tp->dev->dev_addr[5] << 0));
2586 for (i = 0; i < 4; i++) {
2587 if (i == 1 && skip_mac_1)
2589 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2590 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2594 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2595 for (i = 0; i < 12; i++) {
2596 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2597 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2601 addr_high = (tp->dev->dev_addr[0] +
2602 tp->dev->dev_addr[1] +
2603 tp->dev->dev_addr[2] +
2604 tp->dev->dev_addr[3] +
2605 tp->dev->dev_addr[4] +
2606 tp->dev->dev_addr[5]) &
2607 TX_BACKOFF_SEED_MASK;
2608 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2611 static void tg3_enable_register_access(struct tg3 *tp)
2614 * Make sure register accesses (indirect or otherwise) will function
2617 pci_write_config_dword(tp->pdev,
2618 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2621 static int tg3_power_up(struct tg3 *tp)
2623 tg3_enable_register_access(tp);
2625 pci_set_power_state(tp->pdev, PCI_D0);
2627 /* Switch out of Vaux if it is a NIC */
2628 if (tg3_flag(tp, IS_NIC))
2629 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2634 static int tg3_power_down_prepare(struct tg3 *tp)
2637 bool device_should_wake, do_low_power;
2639 tg3_enable_register_access(tp);
2641 /* Restore the CLKREQ setting. */
2642 if (tg3_flag(tp, CLKREQ_BUG)) {
2645 pci_read_config_word(tp->pdev,
2646 tp->pcie_cap + PCI_EXP_LNKCTL,
2648 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2649 pci_write_config_word(tp->pdev,
2650 tp->pcie_cap + PCI_EXP_LNKCTL,
2654 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2655 tw32(TG3PCI_MISC_HOST_CTRL,
2656 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2658 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2659 tg3_flag(tp, WOL_ENABLE);
2661 if (tg3_flag(tp, USE_PHYLIB)) {
2662 do_low_power = false;
2663 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2664 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2665 struct phy_device *phydev;
2666 u32 phyid, advertising;
2668 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2670 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2672 tp->link_config.orig_speed = phydev->speed;
2673 tp->link_config.orig_duplex = phydev->duplex;
2674 tp->link_config.orig_autoneg = phydev->autoneg;
2675 tp->link_config.orig_advertising = phydev->advertising;
2677 advertising = ADVERTISED_TP |
2679 ADVERTISED_Autoneg |
2680 ADVERTISED_10baseT_Half;
2682 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2683 if (tg3_flag(tp, WOL_SPEED_100MB))
2685 ADVERTISED_100baseT_Half |
2686 ADVERTISED_100baseT_Full |
2687 ADVERTISED_10baseT_Full;
2689 advertising |= ADVERTISED_10baseT_Full;
2692 phydev->advertising = advertising;
2694 phy_start_aneg(phydev);
2696 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2697 if (phyid != PHY_ID_BCMAC131) {
2698 phyid &= PHY_BCM_OUI_MASK;
2699 if (phyid == PHY_BCM_OUI_1 ||
2700 phyid == PHY_BCM_OUI_2 ||
2701 phyid == PHY_BCM_OUI_3)
2702 do_low_power = true;
2706 do_low_power = true;
2708 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2709 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2710 tp->link_config.orig_speed = tp->link_config.speed;
2711 tp->link_config.orig_duplex = tp->link_config.duplex;
2712 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2715 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2716 tp->link_config.speed = SPEED_10;
2717 tp->link_config.duplex = DUPLEX_HALF;
2718 tp->link_config.autoneg = AUTONEG_ENABLE;
2719 tg3_setup_phy(tp, 0);
2723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2726 val = tr32(GRC_VCPU_EXT_CTRL);
2727 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2728 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2732 for (i = 0; i < 200; i++) {
2733 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2734 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2739 if (tg3_flag(tp, WOL_CAP))
2740 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2741 WOL_DRV_STATE_SHUTDOWN |
2745 if (device_should_wake) {
2748 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2750 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2751 tg3_phy_auxctl_write(tp,
2752 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2753 MII_TG3_AUXCTL_PCTL_WOL_EN |
2754 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2755 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2759 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2760 mac_mode = MAC_MODE_PORT_MODE_GMII;
2762 mac_mode = MAC_MODE_PORT_MODE_MII;
2764 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2765 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2767 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2768 SPEED_100 : SPEED_10;
2769 if (tg3_5700_link_polarity(tp, speed))
2770 mac_mode |= MAC_MODE_LINK_POLARITY;
2772 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2775 mac_mode = MAC_MODE_PORT_MODE_TBI;
2778 if (!tg3_flag(tp, 5750_PLUS))
2779 tw32(MAC_LED_CTRL, tp->led_ctrl);
2781 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2782 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2783 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2784 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2786 if (tg3_flag(tp, ENABLE_APE))
2787 mac_mode |= MAC_MODE_APE_TX_EN |
2788 MAC_MODE_APE_RX_EN |
2789 MAC_MODE_TDE_ENABLE;
2791 tw32_f(MAC_MODE, mac_mode);
2794 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2798 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2799 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2800 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2803 base_val = tp->pci_clock_ctrl;
2804 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2805 CLOCK_CTRL_TXCLK_DISABLE);
2807 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2808 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2809 } else if (tg3_flag(tp, 5780_CLASS) ||
2810 tg3_flag(tp, CPMU_PRESENT) ||
2811 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2813 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2814 u32 newbits1, newbits2;
2816 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2817 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2818 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2819 CLOCK_CTRL_TXCLK_DISABLE |
2821 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2822 } else if (tg3_flag(tp, 5705_PLUS)) {
2823 newbits1 = CLOCK_CTRL_625_CORE;
2824 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2826 newbits1 = CLOCK_CTRL_ALTCLK;
2827 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2830 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2833 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2836 if (!tg3_flag(tp, 5705_PLUS)) {
2839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2841 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2842 CLOCK_CTRL_TXCLK_DISABLE |
2843 CLOCK_CTRL_44MHZ_CORE);
2845 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2848 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2849 tp->pci_clock_ctrl | newbits3, 40);
2853 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2854 tg3_power_down_phy(tp, do_low_power);
2856 tg3_frob_aux_power(tp);
2858 /* Workaround for unstable PLL clock */
2859 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2860 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2861 u32 val = tr32(0x7d00);
2863 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2865 if (!tg3_flag(tp, ENABLE_ASF)) {
2868 err = tg3_nvram_lock(tp);
2869 tg3_halt_cpu(tp, RX_CPU_BASE);
2871 tg3_nvram_unlock(tp);
2875 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2880 static void tg3_power_down(struct tg3 *tp)
2882 tg3_power_down_prepare(tp);
2884 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2885 pci_set_power_state(tp->pdev, PCI_D3hot);
2888 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2890 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2891 case MII_TG3_AUX_STAT_10HALF:
2893 *duplex = DUPLEX_HALF;
2896 case MII_TG3_AUX_STAT_10FULL:
2898 *duplex = DUPLEX_FULL;
2901 case MII_TG3_AUX_STAT_100HALF:
2903 *duplex = DUPLEX_HALF;
2906 case MII_TG3_AUX_STAT_100FULL:
2908 *duplex = DUPLEX_FULL;
2911 case MII_TG3_AUX_STAT_1000HALF:
2912 *speed = SPEED_1000;
2913 *duplex = DUPLEX_HALF;
2916 case MII_TG3_AUX_STAT_1000FULL:
2917 *speed = SPEED_1000;
2918 *duplex = DUPLEX_FULL;
2922 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2923 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2925 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2929 *speed = SPEED_INVALID;
2930 *duplex = DUPLEX_INVALID;
2935 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2940 new_adv = ADVERTISE_CSMA;
2941 if (advertise & ADVERTISED_10baseT_Half)
2942 new_adv |= ADVERTISE_10HALF;
2943 if (advertise & ADVERTISED_10baseT_Full)
2944 new_adv |= ADVERTISE_10FULL;
2945 if (advertise & ADVERTISED_100baseT_Half)
2946 new_adv |= ADVERTISE_100HALF;
2947 if (advertise & ADVERTISED_100baseT_Full)
2948 new_adv |= ADVERTISE_100FULL;
2950 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2952 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2956 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2960 if (advertise & ADVERTISED_1000baseT_Half)
2961 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2962 if (advertise & ADVERTISED_1000baseT_Full)
2963 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2965 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2966 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2967 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2968 MII_TG3_CTRL_ENABLE_AS_MASTER);
2970 err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2974 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2977 tw32(TG3_CPMU_EEE_MODE,
2978 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2980 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2984 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2986 case ASIC_REV_57765:
2987 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2988 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2989 MII_TG3_DSP_CH34TP2_HIBW01);
2992 val = MII_TG3_DSP_TAP26_ALNOKO |
2993 MII_TG3_DSP_TAP26_RMRXSTO |
2994 MII_TG3_DSP_TAP26_OPCSINPT;
2995 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2999 /* Advertise 100-BaseTX EEE ability */
3000 if (advertise & ADVERTISED_100baseT_Full)
3001 val |= MDIO_AN_EEE_ADV_100TX;
3002 /* Advertise 1000-BaseT EEE ability */
3003 if (advertise & ADVERTISED_1000baseT_Full)
3004 val |= MDIO_AN_EEE_ADV_1000T;
3005 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3007 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3016 static void tg3_phy_copper_begin(struct tg3 *tp)
3021 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3022 new_adv = ADVERTISED_10baseT_Half |
3023 ADVERTISED_10baseT_Full;
3024 if (tg3_flag(tp, WOL_SPEED_100MB))
3025 new_adv |= ADVERTISED_100baseT_Half |
3026 ADVERTISED_100baseT_Full;
3028 tg3_phy_autoneg_cfg(tp, new_adv,
3029 FLOW_CTRL_TX | FLOW_CTRL_RX);
3030 } else if (tp->link_config.speed == SPEED_INVALID) {
3031 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3032 tp->link_config.advertising &=
3033 ~(ADVERTISED_1000baseT_Half |
3034 ADVERTISED_1000baseT_Full);
3036 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3037 tp->link_config.flowctrl);
3039 /* Asking for a specific link mode. */
3040 if (tp->link_config.speed == SPEED_1000) {
3041 if (tp->link_config.duplex == DUPLEX_FULL)
3042 new_adv = ADVERTISED_1000baseT_Full;
3044 new_adv = ADVERTISED_1000baseT_Half;
3045 } else if (tp->link_config.speed == SPEED_100) {
3046 if (tp->link_config.duplex == DUPLEX_FULL)
3047 new_adv = ADVERTISED_100baseT_Full;
3049 new_adv = ADVERTISED_100baseT_Half;
3051 if (tp->link_config.duplex == DUPLEX_FULL)
3052 new_adv = ADVERTISED_10baseT_Full;
3054 new_adv = ADVERTISED_10baseT_Half;
3057 tg3_phy_autoneg_cfg(tp, new_adv,
3058 tp->link_config.flowctrl);
3061 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3062 tp->link_config.speed != SPEED_INVALID) {
3063 u32 bmcr, orig_bmcr;
3065 tp->link_config.active_speed = tp->link_config.speed;
3066 tp->link_config.active_duplex = tp->link_config.duplex;
3069 switch (tp->link_config.speed) {
3075 bmcr |= BMCR_SPEED100;
3079 bmcr |= TG3_BMCR_SPEED1000;
3083 if (tp->link_config.duplex == DUPLEX_FULL)
3084 bmcr |= BMCR_FULLDPLX;
3086 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3087 (bmcr != orig_bmcr)) {
3088 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3089 for (i = 0; i < 1500; i++) {
3093 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3094 tg3_readphy(tp, MII_BMSR, &tmp))
3096 if (!(tmp & BMSR_LSTATUS)) {
3101 tg3_writephy(tp, MII_BMCR, bmcr);
3105 tg3_writephy(tp, MII_BMCR,
3106 BMCR_ANENABLE | BMCR_ANRESTART);
3110 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3114 /* Turn off tap power management. */
3115 /* Set Extended packet length bit */
3116 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3118 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3119 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3120 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3121 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3122 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3129 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3131 u32 adv_reg, all_mask = 0;
3133 if (mask & ADVERTISED_10baseT_Half)
3134 all_mask |= ADVERTISE_10HALF;
3135 if (mask & ADVERTISED_10baseT_Full)
3136 all_mask |= ADVERTISE_10FULL;
3137 if (mask & ADVERTISED_100baseT_Half)
3138 all_mask |= ADVERTISE_100HALF;
3139 if (mask & ADVERTISED_100baseT_Full)
3140 all_mask |= ADVERTISE_100FULL;
3142 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3145 if ((adv_reg & all_mask) != all_mask)
3147 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3151 if (mask & ADVERTISED_1000baseT_Half)
3152 all_mask |= ADVERTISE_1000HALF;
3153 if (mask & ADVERTISED_1000baseT_Full)
3154 all_mask |= ADVERTISE_1000FULL;
3156 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3159 if ((tg3_ctrl & all_mask) != all_mask)
3165 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3169 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3172 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3173 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3175 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3176 if (curadv != reqadv)
3179 if (tg3_flag(tp, PAUSE_AUTONEG))
3180 tg3_readphy(tp, MII_LPA, rmtadv);
3182 /* Reprogram the advertisement register, even if it
3183 * does not affect the current link. If the link
3184 * gets renegotiated in the future, we can save an
3185 * additional renegotiation cycle by advertising
3186 * it correctly in the first place.
3188 if (curadv != reqadv) {
3189 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3190 ADVERTISE_PAUSE_ASYM);
3191 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3198 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3200 int current_link_up;
3202 u32 lcl_adv, rmt_adv;
3210 (MAC_STATUS_SYNC_CHANGED |
3211 MAC_STATUS_CFG_CHANGED |
3212 MAC_STATUS_MI_COMPLETION |
3213 MAC_STATUS_LNKSTATE_CHANGED));
3216 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3218 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3222 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3224 /* Some third-party PHYs need to be reset on link going
3227 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3228 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3230 netif_carrier_ok(tp->dev)) {
3231 tg3_readphy(tp, MII_BMSR, &bmsr);
3232 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3233 !(bmsr & BMSR_LSTATUS))
3239 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3240 tg3_readphy(tp, MII_BMSR, &bmsr);
3241 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3242 !tg3_flag(tp, INIT_COMPLETE))
3245 if (!(bmsr & BMSR_LSTATUS)) {
3246 err = tg3_init_5401phy_dsp(tp);
3250 tg3_readphy(tp, MII_BMSR, &bmsr);
3251 for (i = 0; i < 1000; i++) {
3253 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3254 (bmsr & BMSR_LSTATUS)) {
3260 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3261 TG3_PHY_REV_BCM5401_B0 &&
3262 !(bmsr & BMSR_LSTATUS) &&
3263 tp->link_config.active_speed == SPEED_1000) {
3264 err = tg3_phy_reset(tp);
3266 err = tg3_init_5401phy_dsp(tp);
3271 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3272 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3273 /* 5701 {A0,B0} CRC bug workaround */
3274 tg3_writephy(tp, 0x15, 0x0a75);
3275 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3276 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3277 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3280 /* Clear pending interrupts... */
3281 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3282 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3284 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3285 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3286 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3287 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3291 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3292 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3293 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3295 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3298 current_link_up = 0;
3299 current_speed = SPEED_INVALID;
3300 current_duplex = DUPLEX_INVALID;
3302 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3303 err = tg3_phy_auxctl_read(tp,
3304 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3306 if (!err && !(val & (1 << 10))) {
3307 tg3_phy_auxctl_write(tp,
3308 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3315 for (i = 0; i < 100; i++) {
3316 tg3_readphy(tp, MII_BMSR, &bmsr);
3317 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3318 (bmsr & BMSR_LSTATUS))
3323 if (bmsr & BMSR_LSTATUS) {
3326 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3327 for (i = 0; i < 2000; i++) {
3329 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3334 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3339 for (i = 0; i < 200; i++) {
3340 tg3_readphy(tp, MII_BMCR, &bmcr);
3341 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3343 if (bmcr && bmcr != 0x7fff)
3351 tp->link_config.active_speed = current_speed;
3352 tp->link_config.active_duplex = current_duplex;
3354 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3355 if ((bmcr & BMCR_ANENABLE) &&
3356 tg3_copper_is_advertising_all(tp,
3357 tp->link_config.advertising)) {
3358 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3360 current_link_up = 1;
3363 if (!(bmcr & BMCR_ANENABLE) &&
3364 tp->link_config.speed == current_speed &&
3365 tp->link_config.duplex == current_duplex &&
3366 tp->link_config.flowctrl ==
3367 tp->link_config.active_flowctrl) {
3368 current_link_up = 1;
3372 if (current_link_up == 1 &&
3373 tp->link_config.active_duplex == DUPLEX_FULL)
3374 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3378 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3379 tg3_phy_copper_begin(tp);
3381 tg3_readphy(tp, MII_BMSR, &bmsr);
3382 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3383 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3384 current_link_up = 1;
3387 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3388 if (current_link_up == 1) {
3389 if (tp->link_config.active_speed == SPEED_100 ||
3390 tp->link_config.active_speed == SPEED_10)
3391 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3393 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3394 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3395 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3397 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3399 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3400 if (tp->link_config.active_duplex == DUPLEX_HALF)
3401 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3403 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3404 if (current_link_up == 1 &&
3405 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3406 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3408 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3411 /* ??? Without this setting Netgear GA302T PHY does not
3412 * ??? send/receive packets...
3414 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3415 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3416 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3417 tw32_f(MAC_MI_MODE, tp->mi_mode);
3421 tw32_f(MAC_MODE, tp->mac_mode);
3424 tg3_phy_eee_adjust(tp, current_link_up);
3426 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3427 /* Polled via timer. */
3428 tw32_f(MAC_EVENT, 0);
3430 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3434 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3435 current_link_up == 1 &&
3436 tp->link_config.active_speed == SPEED_1000 &&
3437 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3440 (MAC_STATUS_SYNC_CHANGED |
3441 MAC_STATUS_CFG_CHANGED));
3444 NIC_SRAM_FIRMWARE_MBOX,
3445 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3448 /* Prevent send BD corruption. */
3449 if (tg3_flag(tp, CLKREQ_BUG)) {
3450 u16 oldlnkctl, newlnkctl;
3452 pci_read_config_word(tp->pdev,
3453 tp->pcie_cap + PCI_EXP_LNKCTL,
3455 if (tp->link_config.active_speed == SPEED_100 ||
3456 tp->link_config.active_speed == SPEED_10)
3457 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3459 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3460 if (newlnkctl != oldlnkctl)
3461 pci_write_config_word(tp->pdev,
3462 tp->pcie_cap + PCI_EXP_LNKCTL,
3466 if (current_link_up != netif_carrier_ok(tp->dev)) {
3467 if (current_link_up)
3468 netif_carrier_on(tp->dev);
3470 netif_carrier_off(tp->dev);
3471 tg3_link_report(tp);
3477 struct tg3_fiber_aneginfo {
3479 #define ANEG_STATE_UNKNOWN 0
3480 #define ANEG_STATE_AN_ENABLE 1
3481 #define ANEG_STATE_RESTART_INIT 2
3482 #define ANEG_STATE_RESTART 3
3483 #define ANEG_STATE_DISABLE_LINK_OK 4
3484 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3485 #define ANEG_STATE_ABILITY_DETECT 6
3486 #define ANEG_STATE_ACK_DETECT_INIT 7
3487 #define ANEG_STATE_ACK_DETECT 8
3488 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3489 #define ANEG_STATE_COMPLETE_ACK 10
3490 #define ANEG_STATE_IDLE_DETECT_INIT 11
3491 #define ANEG_STATE_IDLE_DETECT 12
3492 #define ANEG_STATE_LINK_OK 13
3493 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3494 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3497 #define MR_AN_ENABLE 0x00000001
3498 #define MR_RESTART_AN 0x00000002
3499 #define MR_AN_COMPLETE 0x00000004
3500 #define MR_PAGE_RX 0x00000008
3501 #define MR_NP_LOADED 0x00000010
3502 #define MR_TOGGLE_TX 0x00000020
3503 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3504 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3505 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3506 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3507 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3508 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3509 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3510 #define MR_TOGGLE_RX 0x00002000
3511 #define MR_NP_RX 0x00004000
3513 #define MR_LINK_OK 0x80000000
3515 unsigned long link_time, cur_time;
3517 u32 ability_match_cfg;
3518 int ability_match_count;
3520 char ability_match, idle_match, ack_match;
3522 u32 txconfig, rxconfig;
3523 #define ANEG_CFG_NP 0x00000080
3524 #define ANEG_CFG_ACK 0x00000040
3525 #define ANEG_CFG_RF2 0x00000020
3526 #define ANEG_CFG_RF1 0x00000010
3527 #define ANEG_CFG_PS2 0x00000001
3528 #define ANEG_CFG_PS1 0x00008000
3529 #define ANEG_CFG_HD 0x00004000
3530 #define ANEG_CFG_FD 0x00002000
3531 #define ANEG_CFG_INVAL 0x00001f06
3536 #define ANEG_TIMER_ENAB 2
3537 #define ANEG_FAILED -1
3539 #define ANEG_STATE_SETTLE_TIME 10000
3541 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3542 struct tg3_fiber_aneginfo *ap)
3545 unsigned long delta;
3549 if (ap->state == ANEG_STATE_UNKNOWN) {
3553 ap->ability_match_cfg = 0;
3554 ap->ability_match_count = 0;
3555 ap->ability_match = 0;
3561 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3562 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3564 if (rx_cfg_reg != ap->ability_match_cfg) {
3565 ap->ability_match_cfg = rx_cfg_reg;
3566 ap->ability_match = 0;
3567 ap->ability_match_count = 0;
3569 if (++ap->ability_match_count > 1) {
3570 ap->ability_match = 1;
3571 ap->ability_match_cfg = rx_cfg_reg;
3574 if (rx_cfg_reg & ANEG_CFG_ACK)
3582 ap->ability_match_cfg = 0;
3583 ap->ability_match_count = 0;
3584 ap->ability_match = 0;
3590 ap->rxconfig = rx_cfg_reg;
3593 switch (ap->state) {
3594 case ANEG_STATE_UNKNOWN:
3595 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3596 ap->state = ANEG_STATE_AN_ENABLE;
3599 case ANEG_STATE_AN_ENABLE:
3600 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3601 if (ap->flags & MR_AN_ENABLE) {
3604 ap->ability_match_cfg = 0;
3605 ap->ability_match_count = 0;
3606 ap->ability_match = 0;
3610 ap->state = ANEG_STATE_RESTART_INIT;
3612 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3616 case ANEG_STATE_RESTART_INIT:
3617 ap->link_time = ap->cur_time;
3618 ap->flags &= ~(MR_NP_LOADED);
3620 tw32(MAC_TX_AUTO_NEG, 0);
3621 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3622 tw32_f(MAC_MODE, tp->mac_mode);
3625 ret = ANEG_TIMER_ENAB;
3626 ap->state = ANEG_STATE_RESTART;
3629 case ANEG_STATE_RESTART:
3630 delta = ap->cur_time - ap->link_time;
3631 if (delta > ANEG_STATE_SETTLE_TIME)
3632 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3634 ret = ANEG_TIMER_ENAB;
3637 case ANEG_STATE_DISABLE_LINK_OK:
3641 case ANEG_STATE_ABILITY_DETECT_INIT:
3642 ap->flags &= ~(MR_TOGGLE_TX);
3643 ap->txconfig = ANEG_CFG_FD;
3644 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3645 if (flowctrl & ADVERTISE_1000XPAUSE)
3646 ap->txconfig |= ANEG_CFG_PS1;
3647 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3648 ap->txconfig |= ANEG_CFG_PS2;
3649 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3650 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3651 tw32_f(MAC_MODE, tp->mac_mode);
3654 ap->state = ANEG_STATE_ABILITY_DETECT;
3657 case ANEG_STATE_ABILITY_DETECT:
3658 if (ap->ability_match != 0 && ap->rxconfig != 0)
3659 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3662 case ANEG_STATE_ACK_DETECT_INIT:
3663 ap->txconfig |= ANEG_CFG_ACK;
3664 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3665 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3666 tw32_f(MAC_MODE, tp->mac_mode);
3669 ap->state = ANEG_STATE_ACK_DETECT;
3672 case ANEG_STATE_ACK_DETECT:
3673 if (ap->ack_match != 0) {
3674 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3675 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3676 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3678 ap->state = ANEG_STATE_AN_ENABLE;
3680 } else if (ap->ability_match != 0 &&
3681 ap->rxconfig == 0) {
3682 ap->state = ANEG_STATE_AN_ENABLE;
3686 case ANEG_STATE_COMPLETE_ACK_INIT:
3687 if (ap->rxconfig & ANEG_CFG_INVAL) {
3691 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3692 MR_LP_ADV_HALF_DUPLEX |
3693 MR_LP_ADV_SYM_PAUSE |
3694 MR_LP_ADV_ASYM_PAUSE |
3695 MR_LP_ADV_REMOTE_FAULT1 |
3696 MR_LP_ADV_REMOTE_FAULT2 |
3697 MR_LP_ADV_NEXT_PAGE |
3700 if (ap->rxconfig & ANEG_CFG_FD)
3701 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3702 if (ap->rxconfig & ANEG_CFG_HD)
3703 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3704 if (ap->rxconfig & ANEG_CFG_PS1)
3705 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3706 if (ap->rxconfig & ANEG_CFG_PS2)
3707 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3708 if (ap->rxconfig & ANEG_CFG_RF1)
3709 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3710 if (ap->rxconfig & ANEG_CFG_RF2)
3711 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3712 if (ap->rxconfig & ANEG_CFG_NP)
3713 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3715 ap->link_time = ap->cur_time;
3717 ap->flags ^= (MR_TOGGLE_TX);
3718 if (ap->rxconfig & 0x0008)
3719 ap->flags |= MR_TOGGLE_RX;
3720 if (ap->rxconfig & ANEG_CFG_NP)
3721 ap->flags |= MR_NP_RX;
3722 ap->flags |= MR_PAGE_RX;
3724 ap->state = ANEG_STATE_COMPLETE_ACK;
3725 ret = ANEG_TIMER_ENAB;
3728 case ANEG_STATE_COMPLETE_ACK:
3729 if (ap->ability_match != 0 &&
3730 ap->rxconfig == 0) {
3731 ap->state = ANEG_STATE_AN_ENABLE;
3734 delta = ap->cur_time - ap->link_time;
3735 if (delta > ANEG_STATE_SETTLE_TIME) {
3736 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3737 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3739 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3740 !(ap->flags & MR_NP_RX)) {
3741 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3749 case ANEG_STATE_IDLE_DETECT_INIT:
3750 ap->link_time = ap->cur_time;
3751 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3752 tw32_f(MAC_MODE, tp->mac_mode);
3755 ap->state = ANEG_STATE_IDLE_DETECT;
3756 ret = ANEG_TIMER_ENAB;
3759 case ANEG_STATE_IDLE_DETECT:
3760 if (ap->ability_match != 0 &&
3761 ap->rxconfig == 0) {
3762 ap->state = ANEG_STATE_AN_ENABLE;
3765 delta = ap->cur_time - ap->link_time;
3766 if (delta > ANEG_STATE_SETTLE_TIME) {
3767 /* XXX another gem from the Broadcom driver :( */
3768 ap->state = ANEG_STATE_LINK_OK;
3772 case ANEG_STATE_LINK_OK:
3773 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3777 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3778 /* ??? unimplemented */
3781 case ANEG_STATE_NEXT_PAGE_WAIT:
3782 /* ??? unimplemented */
3793 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3796 struct tg3_fiber_aneginfo aninfo;
3797 int status = ANEG_FAILED;
3801 tw32_f(MAC_TX_AUTO_NEG, 0);
3803 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3804 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3807 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3810 memset(&aninfo, 0, sizeof(aninfo));
3811 aninfo.flags |= MR_AN_ENABLE;
3812 aninfo.state = ANEG_STATE_UNKNOWN;
3813 aninfo.cur_time = 0;
3815 while (++tick < 195000) {
3816 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3817 if (status == ANEG_DONE || status == ANEG_FAILED)
3823 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3824 tw32_f(MAC_MODE, tp->mac_mode);
3827 *txflags = aninfo.txconfig;
3828 *rxflags = aninfo.flags;
3830 if (status == ANEG_DONE &&
3831 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3832 MR_LP_ADV_FULL_DUPLEX)))
3838 static void tg3_init_bcm8002(struct tg3 *tp)
3840 u32 mac_status = tr32(MAC_STATUS);
3843 /* Reset when initting first time or we have a link. */
3844 if (tg3_flag(tp, INIT_COMPLETE) &&
3845 !(mac_status & MAC_STATUS_PCS_SYNCED))
3848 /* Set PLL lock range. */
3849 tg3_writephy(tp, 0x16, 0x8007);
3852 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3854 /* Wait for reset to complete. */
3855 /* XXX schedule_timeout() ... */
3856 for (i = 0; i < 500; i++)
3859 /* Config mode; select PMA/Ch 1 regs. */
3860 tg3_writephy(tp, 0x10, 0x8411);
3862 /* Enable auto-lock and comdet, select txclk for tx. */
3863 tg3_writephy(tp, 0x11, 0x0a10);
3865 tg3_writephy(tp, 0x18, 0x00a0);
3866 tg3_writephy(tp, 0x16, 0x41ff);
3868 /* Assert and deassert POR. */
3869 tg3_writephy(tp, 0x13, 0x0400);
3871 tg3_writephy(tp, 0x13, 0x0000);
3873 tg3_writephy(tp, 0x11, 0x0a50);
3875 tg3_writephy(tp, 0x11, 0x0a10);
3877 /* Wait for signal to stabilize */
3878 /* XXX schedule_timeout() ... */
3879 for (i = 0; i < 15000; i++)
3882 /* Deselect the channel register so we can read the PHYID
3885 tg3_writephy(tp, 0x10, 0x8011);
3888 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3891 u32 sg_dig_ctrl, sg_dig_status;
3892 u32 serdes_cfg, expected_sg_dig_ctrl;
3893 int workaround, port_a;
3894 int current_link_up;
3897 expected_sg_dig_ctrl = 0;
3900 current_link_up = 0;
3902 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3903 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3905 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3908 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3909 /* preserve bits 20-23 for voltage regulator */
3910 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3913 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3915 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3916 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3918 u32 val = serdes_cfg;
3924 tw32_f(MAC_SERDES_CFG, val);
3927 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3929 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3930 tg3_setup_flow_control(tp, 0, 0);
3931 current_link_up = 1;
3936 /* Want auto-negotiation. */
3937 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3939 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3940 if (flowctrl & ADVERTISE_1000XPAUSE)
3941 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3942 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3943 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3945 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3946 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3947 tp->serdes_counter &&
3948 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3949 MAC_STATUS_RCVD_CFG)) ==
3950 MAC_STATUS_PCS_SYNCED)) {
3951 tp->serdes_counter--;
3952 current_link_up = 1;
3957 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3958 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3960 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3962 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3963 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3964 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3965 MAC_STATUS_SIGNAL_DET)) {
3966 sg_dig_status = tr32(SG_DIG_STATUS);
3967 mac_status = tr32(MAC_STATUS);
3969 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3970 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3971 u32 local_adv = 0, remote_adv = 0;
3973 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3974 local_adv |= ADVERTISE_1000XPAUSE;
3975 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3976 local_adv |= ADVERTISE_1000XPSE_ASYM;
3978 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3979 remote_adv |= LPA_1000XPAUSE;
3980 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3981 remote_adv |= LPA_1000XPAUSE_ASYM;
3983 tg3_setup_flow_control(tp, local_adv, remote_adv);
3984 current_link_up = 1;
3985 tp->serdes_counter = 0;
3986 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3987 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3988 if (tp->serdes_counter)
3989 tp->serdes_counter--;
3992 u32 val = serdes_cfg;
3999 tw32_f(MAC_SERDES_CFG, val);
4002 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4005 /* Link parallel detection - link is up */
4006 /* only if we have PCS_SYNC and not */
4007 /* receiving config code words */
4008 mac_status = tr32(MAC_STATUS);
4009 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4010 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4011 tg3_setup_flow_control(tp, 0, 0);
4012 current_link_up = 1;
4014 TG3_PHYFLG_PARALLEL_DETECT;
4015 tp->serdes_counter =
4016 SERDES_PARALLEL_DET_TIMEOUT;
4018 goto restart_autoneg;
4022 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4023 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4027 return current_link_up;
4030 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4032 int current_link_up = 0;
4034 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4037 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4038 u32 txflags, rxflags;
4041 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4042 u32 local_adv = 0, remote_adv = 0;
4044 if (txflags & ANEG_CFG_PS1)
4045 local_adv |= ADVERTISE_1000XPAUSE;
4046 if (txflags & ANEG_CFG_PS2)
4047 local_adv |= ADVERTISE_1000XPSE_ASYM;
4049 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4050 remote_adv |= LPA_1000XPAUSE;
4051 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4052 remote_adv |= LPA_1000XPAUSE_ASYM;
4054 tg3_setup_flow_control(tp, local_adv, remote_adv);
4056 current_link_up = 1;
4058 for (i = 0; i < 30; i++) {
4061 (MAC_STATUS_SYNC_CHANGED |
4062 MAC_STATUS_CFG_CHANGED));
4064 if ((tr32(MAC_STATUS) &
4065 (MAC_STATUS_SYNC_CHANGED |
4066 MAC_STATUS_CFG_CHANGED)) == 0)
4070 mac_status = tr32(MAC_STATUS);
4071 if (current_link_up == 0 &&
4072 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4073 !(mac_status & MAC_STATUS_RCVD_CFG))
4074 current_link_up = 1;
4076 tg3_setup_flow_control(tp, 0, 0);
4078 /* Forcing 1000FD link up. */
4079 current_link_up = 1;
4081 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4084 tw32_f(MAC_MODE, tp->mac_mode);
4089 return current_link_up;
4092 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4095 u16 orig_active_speed;
4096 u8 orig_active_duplex;
4098 int current_link_up;
4101 orig_pause_cfg = tp->link_config.active_flowctrl;
4102 orig_active_speed = tp->link_config.active_speed;
4103 orig_active_duplex = tp->link_config.active_duplex;
4105 if (!tg3_flag(tp, HW_AUTONEG) &&
4106 netif_carrier_ok(tp->dev) &&
4107 tg3_flag(tp, INIT_COMPLETE)) {
4108 mac_status = tr32(MAC_STATUS);
4109 mac_status &= (MAC_STATUS_PCS_SYNCED |
4110 MAC_STATUS_SIGNAL_DET |
4111 MAC_STATUS_CFG_CHANGED |
4112 MAC_STATUS_RCVD_CFG);
4113 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4114 MAC_STATUS_SIGNAL_DET)) {
4115 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4116 MAC_STATUS_CFG_CHANGED));
4121 tw32_f(MAC_TX_AUTO_NEG, 0);
4123 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4124 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4125 tw32_f(MAC_MODE, tp->mac_mode);
4128 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4129 tg3_init_bcm8002(tp);
4131 /* Enable link change event even when serdes polling. */
4132 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4135 current_link_up = 0;
4136 mac_status = tr32(MAC_STATUS);
4138 if (tg3_flag(tp, HW_AUTONEG))
4139 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4141 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4143 tp->napi[0].hw_status->status =
4144 (SD_STATUS_UPDATED |
4145 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4147 for (i = 0; i < 100; i++) {
4148 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4149 MAC_STATUS_CFG_CHANGED));
4151 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4152 MAC_STATUS_CFG_CHANGED |
4153 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4157 mac_status = tr32(MAC_STATUS);
4158 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4159 current_link_up = 0;
4160 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4161 tp->serdes_counter == 0) {
4162 tw32_f(MAC_MODE, (tp->mac_mode |
4163 MAC_MODE_SEND_CONFIGS));
4165 tw32_f(MAC_MODE, tp->mac_mode);
4169 if (current_link_up == 1) {
4170 tp->link_config.active_speed = SPEED_1000;
4171 tp->link_config.active_duplex = DUPLEX_FULL;
4172 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4173 LED_CTRL_LNKLED_OVERRIDE |
4174 LED_CTRL_1000MBPS_ON));
4176 tp->link_config.active_speed = SPEED_INVALID;
4177 tp->link_config.active_duplex = DUPLEX_INVALID;
4178 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4179 LED_CTRL_LNKLED_OVERRIDE |
4180 LED_CTRL_TRAFFIC_OVERRIDE));
4183 if (current_link_up != netif_carrier_ok(tp->dev)) {
4184 if (current_link_up)
4185 netif_carrier_on(tp->dev);
4187 netif_carrier_off(tp->dev);
4188 tg3_link_report(tp);
4190 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4191 if (orig_pause_cfg != now_pause_cfg ||
4192 orig_active_speed != tp->link_config.active_speed ||
4193 orig_active_duplex != tp->link_config.active_duplex)
4194 tg3_link_report(tp);
4200 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4202 int current_link_up, err = 0;
4206 u32 local_adv, remote_adv;
4208 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4209 tw32_f(MAC_MODE, tp->mac_mode);
4215 (MAC_STATUS_SYNC_CHANGED |
4216 MAC_STATUS_CFG_CHANGED |
4217 MAC_STATUS_MI_COMPLETION |
4218 MAC_STATUS_LNKSTATE_CHANGED));
4224 current_link_up = 0;
4225 current_speed = SPEED_INVALID;
4226 current_duplex = DUPLEX_INVALID;
4228 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4229 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4230 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4231 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4232 bmsr |= BMSR_LSTATUS;
4234 bmsr &= ~BMSR_LSTATUS;
4237 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4239 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4240 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4241 /* do nothing, just check for link up at the end */
4242 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4245 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4246 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4247 ADVERTISE_1000XPAUSE |
4248 ADVERTISE_1000XPSE_ASYM |
4251 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4253 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4254 new_adv |= ADVERTISE_1000XHALF;
4255 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4256 new_adv |= ADVERTISE_1000XFULL;
4258 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4259 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4260 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4261 tg3_writephy(tp, MII_BMCR, bmcr);
4263 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4264 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4265 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4272 bmcr &= ~BMCR_SPEED1000;
4273 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4275 if (tp->link_config.duplex == DUPLEX_FULL)
4276 new_bmcr |= BMCR_FULLDPLX;
4278 if (new_bmcr != bmcr) {
4279 /* BMCR_SPEED1000 is a reserved bit that needs
4280 * to be set on write.
4282 new_bmcr |= BMCR_SPEED1000;
4284 /* Force a linkdown */
4285 if (netif_carrier_ok(tp->dev)) {
4288 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4289 adv &= ~(ADVERTISE_1000XFULL |
4290 ADVERTISE_1000XHALF |
4292 tg3_writephy(tp, MII_ADVERTISE, adv);
4293 tg3_writephy(tp, MII_BMCR, bmcr |
4297 netif_carrier_off(tp->dev);
4299 tg3_writephy(tp, MII_BMCR, new_bmcr);
4301 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4302 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4303 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4305 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4306 bmsr |= BMSR_LSTATUS;
4308 bmsr &= ~BMSR_LSTATUS;
4310 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4314 if (bmsr & BMSR_LSTATUS) {
4315 current_speed = SPEED_1000;
4316 current_link_up = 1;
4317 if (bmcr & BMCR_FULLDPLX)
4318 current_duplex = DUPLEX_FULL;
4320 current_duplex = DUPLEX_HALF;
4325 if (bmcr & BMCR_ANENABLE) {
4328 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4329 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4330 common = local_adv & remote_adv;
4331 if (common & (ADVERTISE_1000XHALF |
4332 ADVERTISE_1000XFULL)) {
4333 if (common & ADVERTISE_1000XFULL)
4334 current_duplex = DUPLEX_FULL;
4336 current_duplex = DUPLEX_HALF;
4337 } else if (!tg3_flag(tp, 5780_CLASS)) {
4338 /* Link is up via parallel detect */
4340 current_link_up = 0;
4345 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4346 tg3_setup_flow_control(tp, local_adv, remote_adv);
4348 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4349 if (tp->link_config.active_duplex == DUPLEX_HALF)
4350 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4352 tw32_f(MAC_MODE, tp->mac_mode);
4355 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4357 tp->link_config.active_speed = current_speed;
4358 tp->link_config.active_duplex = current_duplex;
4360 if (current_link_up != netif_carrier_ok(tp->dev)) {
4361 if (current_link_up)
4362 netif_carrier_on(tp->dev);
4364 netif_carrier_off(tp->dev);
4365 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4367 tg3_link_report(tp);
4372 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4374 if (tp->serdes_counter) {
4375 /* Give autoneg time to complete. */
4376 tp->serdes_counter--;
4380 if (!netif_carrier_ok(tp->dev) &&
4381 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4384 tg3_readphy(tp, MII_BMCR, &bmcr);
4385 if (bmcr & BMCR_ANENABLE) {
4388 /* Select shadow register 0x1f */
4389 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4390 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4392 /* Select expansion interrupt status register */
4393 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4394 MII_TG3_DSP_EXP1_INT_STAT);
4395 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4396 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4398 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4399 /* We have signal detect and not receiving
4400 * config code words, link is up by parallel
4404 bmcr &= ~BMCR_ANENABLE;
4405 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4406 tg3_writephy(tp, MII_BMCR, bmcr);
4407 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4410 } else if (netif_carrier_ok(tp->dev) &&
4411 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4412 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4415 /* Select expansion interrupt status register */
4416 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4417 MII_TG3_DSP_EXP1_INT_STAT);
4418 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4422 /* Config code words received, turn on autoneg. */
4423 tg3_readphy(tp, MII_BMCR, &bmcr);
4424 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4426 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4432 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4437 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4438 err = tg3_setup_fiber_phy(tp, force_reset);
4439 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4440 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4442 err = tg3_setup_copper_phy(tp, force_reset);
4444 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4447 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4448 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4450 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4455 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4456 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4457 tw32(GRC_MISC_CFG, val);
4460 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4461 (6 << TX_LENGTHS_IPG_SHIFT);
4462 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4463 val |= tr32(MAC_TX_LENGTHS) &
4464 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4465 TX_LENGTHS_CNT_DWN_VAL_MSK);
4467 if (tp->link_config.active_speed == SPEED_1000 &&
4468 tp->link_config.active_duplex == DUPLEX_HALF)
4469 tw32(MAC_TX_LENGTHS, val |
4470 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4472 tw32(MAC_TX_LENGTHS, val |
4473 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4475 if (!tg3_flag(tp, 5705_PLUS)) {
4476 if (netif_carrier_ok(tp->dev)) {
4477 tw32(HOSTCC_STAT_COAL_TICKS,
4478 tp->coal.stats_block_coalesce_usecs);
4480 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4484 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4485 val = tr32(PCIE_PWR_MGMT_THRESH);
4486 if (!netif_carrier_ok(tp->dev))
4487 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4490 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4491 tw32(PCIE_PWR_MGMT_THRESH, val);
4497 static inline int tg3_irq_sync(struct tg3 *tp)
4499 return tp->irq_sync;
4502 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4506 dst = (u32 *)((u8 *)dst + off);
4507 for (i = 0; i < len; i += sizeof(u32))
4508 *dst++ = tr32(off + i);
4511 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4513 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4514 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4515 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4516 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4517 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4518 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4519 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4520 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4521 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4522 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4523 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4524 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4525 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4526 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4527 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4528 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4529 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4530 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4531 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4533 if (tg3_flag(tp, SUPPORT_MSIX))
4534 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4536 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4537 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4538 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4539 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4540 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4541 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4542 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4543 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4545 if (!tg3_flag(tp, 5705_PLUS)) {
4546 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4547 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4548 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4551 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4552 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4553 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4554 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4555 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4557 if (tg3_flag(tp, NVRAM))
4558 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4561 static void tg3_dump_state(struct tg3 *tp)
4566 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4568 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4572 if (tg3_flag(tp, PCI_EXPRESS)) {
4573 /* Read up to but not including private PCI registers */
4574 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4575 regs[i / sizeof(u32)] = tr32(i);
4577 tg3_dump_legacy_regs(tp, regs);
4579 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4580 if (!regs[i + 0] && !regs[i + 1] &&
4581 !regs[i + 2] && !regs[i + 3])
4584 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4586 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4591 for (i = 0; i < tp->irq_cnt; i++) {
4592 struct tg3_napi *tnapi = &tp->napi[i];
4594 /* SW status block */
4596 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4598 tnapi->hw_status->status,
4599 tnapi->hw_status->status_tag,
4600 tnapi->hw_status->rx_jumbo_consumer,
4601 tnapi->hw_status->rx_consumer,
4602 tnapi->hw_status->rx_mini_consumer,
4603 tnapi->hw_status->idx[0].rx_producer,
4604 tnapi->hw_status->idx[0].tx_consumer);
4607 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4609 tnapi->last_tag, tnapi->last_irq_tag,
4610 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4612 tnapi->prodring.rx_std_prod_idx,
4613 tnapi->prodring.rx_std_cons_idx,
4614 tnapi->prodring.rx_jmb_prod_idx,
4615 tnapi->prodring.rx_jmb_cons_idx);
4619 /* This is called whenever we suspect that the system chipset is re-
4620 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4621 * is bogus tx completions. We try to recover by setting the
4622 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4625 static void tg3_tx_recover(struct tg3 *tp)
4627 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4628 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4630 netdev_warn(tp->dev,
4631 "The system may be re-ordering memory-mapped I/O "
4632 "cycles to the network device, attempting to recover. "
4633 "Please report the problem to the driver maintainer "
4634 "and include system chipset information.\n");
4636 spin_lock(&tp->lock);
4637 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4638 spin_unlock(&tp->lock);
4641 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4643 /* Tell compiler to fetch tx indices from memory. */
4645 return tnapi->tx_pending -
4646 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4649 /* Tigon3 never reports partial packet sends. So we do not
4650 * need special logic to handle SKBs that have not had all
4651 * of their frags sent yet, like SunGEM does.
4653 static void tg3_tx(struct tg3_napi *tnapi)
4655 struct tg3 *tp = tnapi->tp;
4656 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4657 u32 sw_idx = tnapi->tx_cons;
4658 struct netdev_queue *txq;
4659 int index = tnapi - tp->napi;
4661 if (tg3_flag(tp, ENABLE_TSS))
4664 txq = netdev_get_tx_queue(tp->dev, index);
4666 while (sw_idx != hw_idx) {
4667 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4668 struct sk_buff *skb = ri->skb;
4671 if (unlikely(skb == NULL)) {
4676 pci_unmap_single(tp->pdev,
4677 dma_unmap_addr(ri, mapping),
4683 sw_idx = NEXT_TX(sw_idx);
4685 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4686 ri = &tnapi->tx_buffers[sw_idx];
4687 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4690 pci_unmap_page(tp->pdev,
4691 dma_unmap_addr(ri, mapping),
4692 skb_shinfo(skb)->frags[i].size,
4694 sw_idx = NEXT_TX(sw_idx);
4699 if (unlikely(tx_bug)) {
4705 tnapi->tx_cons = sw_idx;
4707 /* Need to make the tx_cons update visible to tg3_start_xmit()
4708 * before checking for netif_queue_stopped(). Without the
4709 * memory barrier, there is a small possibility that tg3_start_xmit()
4710 * will miss it and cause the queue to be stopped forever.
4714 if (unlikely(netif_tx_queue_stopped(txq) &&
4715 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4716 __netif_tx_lock(txq, smp_processor_id());
4717 if (netif_tx_queue_stopped(txq) &&
4718 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4719 netif_tx_wake_queue(txq);
4720 __netif_tx_unlock(txq);
4724 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4729 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4730 map_sz, PCI_DMA_FROMDEVICE);
4731 dev_kfree_skb_any(ri->skb);
4735 /* Returns size of skb allocated or < 0 on error.
4737 * We only need to fill in the address because the other members
4738 * of the RX descriptor are invariant, see tg3_init_rings.
4740 * Note the purposeful assymetry of cpu vs. chip accesses. For
4741 * posting buffers we only dirty the first cache line of the RX
4742 * descriptor (containing the address). Whereas for the RX status
4743 * buffers the cpu only reads the last cacheline of the RX descriptor
4744 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4746 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4747 u32 opaque_key, u32 dest_idx_unmasked)
4749 struct tg3_rx_buffer_desc *desc;
4750 struct ring_info *map;
4751 struct sk_buff *skb;
4753 int skb_size, dest_idx;
4755 switch (opaque_key) {
4756 case RXD_OPAQUE_RING_STD:
4757 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4758 desc = &tpr->rx_std[dest_idx];
4759 map = &tpr->rx_std_buffers[dest_idx];
4760 skb_size = tp->rx_pkt_map_sz;
4763 case RXD_OPAQUE_RING_JUMBO:
4764 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4765 desc = &tpr->rx_jmb[dest_idx].std;
4766 map = &tpr->rx_jmb_buffers[dest_idx];
4767 skb_size = TG3_RX_JMB_MAP_SZ;
4774 /* Do not overwrite any of the map or rp information
4775 * until we are sure we can commit to a new buffer.
4777 * Callers depend upon this behavior and assume that
4778 * we leave everything unchanged if we fail.
4780 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4784 skb_reserve(skb, tp->rx_offset);
4786 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4787 PCI_DMA_FROMDEVICE);
4788 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4794 dma_unmap_addr_set(map, mapping, mapping);
4796 desc->addr_hi = ((u64)mapping >> 32);
4797 desc->addr_lo = ((u64)mapping & 0xffffffff);
4802 /* We only need to move over in the address because the other
4803 * members of the RX descriptor are invariant. See notes above
4804 * tg3_alloc_rx_skb for full details.
4806 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4807 struct tg3_rx_prodring_set *dpr,
4808 u32 opaque_key, int src_idx,
4809 u32 dest_idx_unmasked)
4811 struct tg3 *tp = tnapi->tp;
4812 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4813 struct ring_info *src_map, *dest_map;
4814 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4817 switch (opaque_key) {
4818 case RXD_OPAQUE_RING_STD:
4819 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4820 dest_desc = &dpr->rx_std[dest_idx];
4821 dest_map = &dpr->rx_std_buffers[dest_idx];
4822 src_desc = &spr->rx_std[src_idx];
4823 src_map = &spr->rx_std_buffers[src_idx];
4826 case RXD_OPAQUE_RING_JUMBO:
4827 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4828 dest_desc = &dpr->rx_jmb[dest_idx].std;
4829 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4830 src_desc = &spr->rx_jmb[src_idx].std;
4831 src_map = &spr->rx_jmb_buffers[src_idx];
4838 dest_map->skb = src_map->skb;
4839 dma_unmap_addr_set(dest_map, mapping,
4840 dma_unmap_addr(src_map, mapping));
4841 dest_desc->addr_hi = src_desc->addr_hi;
4842 dest_desc->addr_lo = src_desc->addr_lo;
4844 /* Ensure that the update to the skb happens after the physical
4845 * addresses have been transferred to the new BD location.
4849 src_map->skb = NULL;
4852 /* The RX ring scheme is composed of multiple rings which post fresh
4853 * buffers to the chip, and one special ring the chip uses to report
4854 * status back to the host.
4856 * The special ring reports the status of received packets to the
4857 * host. The chip does not write into the original descriptor the
4858 * RX buffer was obtained from. The chip simply takes the original
4859 * descriptor as provided by the host, updates the status and length
4860 * field, then writes this into the next status ring entry.
4862 * Each ring the host uses to post buffers to the chip is described
4863 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4864 * it is first placed into the on-chip ram. When the packet's length
4865 * is known, it walks down the TG3_BDINFO entries to select the ring.
4866 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4867 * which is within the range of the new packet's length is chosen.
4869 * The "separate ring for rx status" scheme may sound queer, but it makes
4870 * sense from a cache coherency perspective. If only the host writes
4871 * to the buffer post rings, and only the chip writes to the rx status
4872 * rings, then cache lines never move beyond shared-modified state.
4873 * If both the host and chip were to write into the same ring, cache line
4874 * eviction could occur since both entities want it in an exclusive state.
4876 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4878 struct tg3 *tp = tnapi->tp;
4879 u32 work_mask, rx_std_posted = 0;
4880 u32 std_prod_idx, jmb_prod_idx;
4881 u32 sw_idx = tnapi->rx_rcb_ptr;
4884 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4886 hw_idx = *(tnapi->rx_rcb_prod_idx);
4888 * We need to order the read of hw_idx and the read of
4889 * the opaque cookie.
4894 std_prod_idx = tpr->rx_std_prod_idx;
4895 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4896 while (sw_idx != hw_idx && budget > 0) {
4897 struct ring_info *ri;
4898 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4900 struct sk_buff *skb;
4901 dma_addr_t dma_addr;
4902 u32 opaque_key, desc_idx, *post_ptr;
4904 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4905 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4906 if (opaque_key == RXD_OPAQUE_RING_STD) {
4907 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4908 dma_addr = dma_unmap_addr(ri, mapping);
4910 post_ptr = &std_prod_idx;
4912 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4913 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4914 dma_addr = dma_unmap_addr(ri, mapping);
4916 post_ptr = &jmb_prod_idx;
4918 goto next_pkt_nopost;
4920 work_mask |= opaque_key;
4922 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4923 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4925 tg3_recycle_rx(tnapi, tpr, opaque_key,
4926 desc_idx, *post_ptr);
4928 /* Other statistics kept track of by card. */
4933 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4936 if (len > TG3_RX_COPY_THRESH(tp)) {
4939 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4944 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4945 PCI_DMA_FROMDEVICE);
4947 /* Ensure that the update to the skb happens
4948 * after the usage of the old DMA mapping.
4956 struct sk_buff *copy_skb;
4958 tg3_recycle_rx(tnapi, tpr, opaque_key,
4959 desc_idx, *post_ptr);
4961 copy_skb = netdev_alloc_skb(tp->dev, len +
4963 if (copy_skb == NULL)
4964 goto drop_it_no_recycle;
4966 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4967 skb_put(copy_skb, len);
4968 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4969 skb_copy_from_linear_data(skb, copy_skb->data, len);
4970 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4972 /* We'll reuse the original ring buffer. */
4976 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4977 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4978 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4979 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4980 skb->ip_summed = CHECKSUM_UNNECESSARY;
4982 skb_checksum_none_assert(skb);
4984 skb->protocol = eth_type_trans(skb, tp->dev);
4986 if (len > (tp->dev->mtu + ETH_HLEN) &&
4987 skb->protocol != htons(ETH_P_8021Q)) {
4989 goto drop_it_no_recycle;
4992 if (desc->type_flags & RXD_FLAG_VLAN &&
4993 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4994 __vlan_hwaccel_put_tag(skb,
4995 desc->err_vlan & RXD_VLAN_MASK);
4997 napi_gro_receive(&tnapi->napi, skb);
5005 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5006 tpr->rx_std_prod_idx = std_prod_idx &
5007 tp->rx_std_ring_mask;
5008 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5009 tpr->rx_std_prod_idx);
5010 work_mask &= ~RXD_OPAQUE_RING_STD;
5015 sw_idx &= tp->rx_ret_ring_mask;
5017 /* Refresh hw_idx to see if there is new work */
5018 if (sw_idx == hw_idx) {
5019 hw_idx = *(tnapi->rx_rcb_prod_idx);
5024 /* ACK the status ring. */
5025 tnapi->rx_rcb_ptr = sw_idx;
5026 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5028 /* Refill RX ring(s). */
5029 if (!tg3_flag(tp, ENABLE_RSS)) {
5030 if (work_mask & RXD_OPAQUE_RING_STD) {
5031 tpr->rx_std_prod_idx = std_prod_idx &
5032 tp->rx_std_ring_mask;
5033 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5034 tpr->rx_std_prod_idx);
5036 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5037 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5038 tp->rx_jmb_ring_mask;
5039 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5040 tpr->rx_jmb_prod_idx);
5043 } else if (work_mask) {
5044 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5045 * updated before the producer indices can be updated.
5049 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5050 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5052 if (tnapi != &tp->napi[1])
5053 napi_schedule(&tp->napi[1].napi);
5059 static void tg3_poll_link(struct tg3 *tp)
5061 /* handle link change and other phy events */
5062 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5063 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5065 if (sblk->status & SD_STATUS_LINK_CHG) {
5066 sblk->status = SD_STATUS_UPDATED |
5067 (sblk->status & ~SD_STATUS_LINK_CHG);
5068 spin_lock(&tp->lock);
5069 if (tg3_flag(tp, USE_PHYLIB)) {
5071 (MAC_STATUS_SYNC_CHANGED |
5072 MAC_STATUS_CFG_CHANGED |
5073 MAC_STATUS_MI_COMPLETION |
5074 MAC_STATUS_LNKSTATE_CHANGED));
5077 tg3_setup_phy(tp, 0);
5078 spin_unlock(&tp->lock);
5083 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5084 struct tg3_rx_prodring_set *dpr,
5085 struct tg3_rx_prodring_set *spr)
5087 u32 si, di, cpycnt, src_prod_idx;
5091 src_prod_idx = spr->rx_std_prod_idx;
5093 /* Make sure updates to the rx_std_buffers[] entries and the
5094 * standard producer index are seen in the correct order.
5098 if (spr->rx_std_cons_idx == src_prod_idx)
5101 if (spr->rx_std_cons_idx < src_prod_idx)
5102 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5104 cpycnt = tp->rx_std_ring_mask + 1 -
5105 spr->rx_std_cons_idx;
5107 cpycnt = min(cpycnt,
5108 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5110 si = spr->rx_std_cons_idx;
5111 di = dpr->rx_std_prod_idx;
5113 for (i = di; i < di + cpycnt; i++) {
5114 if (dpr->rx_std_buffers[i].skb) {
5124 /* Ensure that updates to the rx_std_buffers ring and the
5125 * shadowed hardware producer ring from tg3_recycle_skb() are
5126 * ordered correctly WRT the skb check above.
5130 memcpy(&dpr->rx_std_buffers[di],
5131 &spr->rx_std_buffers[si],
5132 cpycnt * sizeof(struct ring_info));
5134 for (i = 0; i < cpycnt; i++, di++, si++) {
5135 struct tg3_rx_buffer_desc *sbd, *dbd;
5136 sbd = &spr->rx_std[si];
5137 dbd = &dpr->rx_std[di];
5138 dbd->addr_hi = sbd->addr_hi;
5139 dbd->addr_lo = sbd->addr_lo;
5142 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5143 tp->rx_std_ring_mask;
5144 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5145 tp->rx_std_ring_mask;
5149 src_prod_idx = spr->rx_jmb_prod_idx;
5151 /* Make sure updates to the rx_jmb_buffers[] entries and
5152 * the jumbo producer index are seen in the correct order.
5156 if (spr->rx_jmb_cons_idx == src_prod_idx)
5159 if (spr->rx_jmb_cons_idx < src_prod_idx)
5160 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5162 cpycnt = tp->rx_jmb_ring_mask + 1 -
5163 spr->rx_jmb_cons_idx;
5165 cpycnt = min(cpycnt,
5166 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5168 si = spr->rx_jmb_cons_idx;
5169 di = dpr->rx_jmb_prod_idx;
5171 for (i = di; i < di + cpycnt; i++) {
5172 if (dpr->rx_jmb_buffers[i].skb) {
5182 /* Ensure that updates to the rx_jmb_buffers ring and the
5183 * shadowed hardware producer ring from tg3_recycle_skb() are
5184 * ordered correctly WRT the skb check above.
5188 memcpy(&dpr->rx_jmb_buffers[di],
5189 &spr->rx_jmb_buffers[si],
5190 cpycnt * sizeof(struct ring_info));
5192 for (i = 0; i < cpycnt; i++, di++, si++) {
5193 struct tg3_rx_buffer_desc *sbd, *dbd;
5194 sbd = &spr->rx_jmb[si].std;
5195 dbd = &dpr->rx_jmb[di].std;
5196 dbd->addr_hi = sbd->addr_hi;
5197 dbd->addr_lo = sbd->addr_lo;
5200 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5201 tp->rx_jmb_ring_mask;
5202 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5203 tp->rx_jmb_ring_mask;
5209 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5211 struct tg3 *tp = tnapi->tp;
5213 /* run TX completion thread */
5214 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5216 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5220 /* run RX thread, within the bounds set by NAPI.
5221 * All RX "locking" is done by ensuring outside
5222 * code synchronizes with tg3->napi.poll()
5224 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5225 work_done += tg3_rx(tnapi, budget - work_done);
5227 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5228 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5230 u32 std_prod_idx = dpr->rx_std_prod_idx;
5231 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5233 for (i = 1; i < tp->irq_cnt; i++)
5234 err |= tg3_rx_prodring_xfer(tp, dpr,
5235 &tp->napi[i].prodring);
5239 if (std_prod_idx != dpr->rx_std_prod_idx)
5240 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5241 dpr->rx_std_prod_idx);
5243 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5244 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5245 dpr->rx_jmb_prod_idx);
5250 tw32_f(HOSTCC_MODE, tp->coal_now);
5256 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5258 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5259 struct tg3 *tp = tnapi->tp;
5261 struct tg3_hw_status *sblk = tnapi->hw_status;
5264 work_done = tg3_poll_work(tnapi, work_done, budget);
5266 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5269 if (unlikely(work_done >= budget))
5272 /* tp->last_tag is used in tg3_int_reenable() below
5273 * to tell the hw how much work has been processed,
5274 * so we must read it before checking for more work.
5276 tnapi->last_tag = sblk->status_tag;
5277 tnapi->last_irq_tag = tnapi->last_tag;
5280 /* check for RX/TX work to do */
5281 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5282 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5283 napi_complete(napi);
5284 /* Reenable interrupts. */
5285 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5294 /* work_done is guaranteed to be less than budget. */
5295 napi_complete(napi);
5296 schedule_work(&tp->reset_task);
5300 static void tg3_process_error(struct tg3 *tp)
5303 bool real_error = false;
5305 if (tg3_flag(tp, ERROR_PROCESSED))
5308 /* Check Flow Attention register */
5309 val = tr32(HOSTCC_FLOW_ATTN);
5310 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5311 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5315 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5316 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5320 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5321 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5330 tg3_flag_set(tp, ERROR_PROCESSED);
5331 schedule_work(&tp->reset_task);
5334 static int tg3_poll(struct napi_struct *napi, int budget)
5336 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5337 struct tg3 *tp = tnapi->tp;
5339 struct tg3_hw_status *sblk = tnapi->hw_status;
5342 if (sblk->status & SD_STATUS_ERROR)
5343 tg3_process_error(tp);
5347 work_done = tg3_poll_work(tnapi, work_done, budget);
5349 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5352 if (unlikely(work_done >= budget))
5355 if (tg3_flag(tp, TAGGED_STATUS)) {
5356 /* tp->last_tag is used in tg3_int_reenable() below
5357 * to tell the hw how much work has been processed,
5358 * so we must read it before checking for more work.
5360 tnapi->last_tag = sblk->status_tag;
5361 tnapi->last_irq_tag = tnapi->last_tag;
5364 sblk->status &= ~SD_STATUS_UPDATED;
5366 if (likely(!tg3_has_work(tnapi))) {
5367 napi_complete(napi);
5368 tg3_int_reenable(tnapi);
5376 /* work_done is guaranteed to be less than budget. */
5377 napi_complete(napi);
5378 schedule_work(&tp->reset_task);
5382 static void tg3_napi_disable(struct tg3 *tp)
5386 for (i = tp->irq_cnt - 1; i >= 0; i--)
5387 napi_disable(&tp->napi[i].napi);
5390 static void tg3_napi_enable(struct tg3 *tp)
5394 for (i = 0; i < tp->irq_cnt; i++)
5395 napi_enable(&tp->napi[i].napi);
5398 static void tg3_napi_init(struct tg3 *tp)
5402 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5403 for (i = 1; i < tp->irq_cnt; i++)
5404 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5407 static void tg3_napi_fini(struct tg3 *tp)
5411 for (i = 0; i < tp->irq_cnt; i++)
5412 netif_napi_del(&tp->napi[i].napi);
5415 static inline void tg3_netif_stop(struct tg3 *tp)
5417 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5418 tg3_napi_disable(tp);
5419 netif_tx_disable(tp->dev);
5422 static inline void tg3_netif_start(struct tg3 *tp)
5424 /* NOTE: unconditional netif_tx_wake_all_queues is only
5425 * appropriate so long as all callers are assured to
5426 * have free tx slots (such as after tg3_init_hw)
5428 netif_tx_wake_all_queues(tp->dev);
5430 tg3_napi_enable(tp);
5431 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5432 tg3_enable_ints(tp);
5435 static void tg3_irq_quiesce(struct tg3 *tp)
5439 BUG_ON(tp->irq_sync);
5444 for (i = 0; i < tp->irq_cnt; i++)
5445 synchronize_irq(tp->napi[i].irq_vec);
5448 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5449 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5450 * with as well. Most of the time, this is not necessary except when
5451 * shutting down the device.
5453 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5455 spin_lock_bh(&tp->lock);
5457 tg3_irq_quiesce(tp);
5460 static inline void tg3_full_unlock(struct tg3 *tp)
5462 spin_unlock_bh(&tp->lock);
5465 /* One-shot MSI handler - Chip automatically disables interrupt
5466 * after sending MSI so driver doesn't have to do it.
5468 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5470 struct tg3_napi *tnapi = dev_id;
5471 struct tg3 *tp = tnapi->tp;
5473 prefetch(tnapi->hw_status);
5475 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5477 if (likely(!tg3_irq_sync(tp)))
5478 napi_schedule(&tnapi->napi);
5483 /* MSI ISR - No need to check for interrupt sharing and no need to
5484 * flush status block and interrupt mailbox. PCI ordering rules
5485 * guarantee that MSI will arrive after the status block.
5487 static irqreturn_t tg3_msi(int irq, void *dev_id)
5489 struct tg3_napi *tnapi = dev_id;
5490 struct tg3 *tp = tnapi->tp;
5492 prefetch(tnapi->hw_status);
5494 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5496 * Writing any value to intr-mbox-0 clears PCI INTA# and
5497 * chip-internal interrupt pending events.
5498 * Writing non-zero to intr-mbox-0 additional tells the
5499 * NIC to stop sending us irqs, engaging "in-intr-handler"
5502 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5503 if (likely(!tg3_irq_sync(tp)))
5504 napi_schedule(&tnapi->napi);
5506 return IRQ_RETVAL(1);
5509 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5511 struct tg3_napi *tnapi = dev_id;
5512 struct tg3 *tp = tnapi->tp;
5513 struct tg3_hw_status *sblk = tnapi->hw_status;
5514 unsigned int handled = 1;
5516 /* In INTx mode, it is possible for the interrupt to arrive at
5517 * the CPU before the status block posted prior to the interrupt.
5518 * Reading the PCI State register will confirm whether the
5519 * interrupt is ours and will flush the status block.
5521 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5522 if (tg3_flag(tp, CHIP_RESETTING) ||
5523 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5530 * Writing any value to intr-mbox-0 clears PCI INTA# and
5531 * chip-internal interrupt pending events.
5532 * Writing non-zero to intr-mbox-0 additional tells the
5533 * NIC to stop sending us irqs, engaging "in-intr-handler"
5536 * Flush the mailbox to de-assert the IRQ immediately to prevent
5537 * spurious interrupts. The flush impacts performance but
5538 * excessive spurious interrupts can be worse in some cases.
5540 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5541 if (tg3_irq_sync(tp))
5543 sblk->status &= ~SD_STATUS_UPDATED;
5544 if (likely(tg3_has_work(tnapi))) {
5545 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5546 napi_schedule(&tnapi->napi);
5548 /* No work, shared interrupt perhaps? re-enable
5549 * interrupts, and flush that PCI write
5551 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5555 return IRQ_RETVAL(handled);
5558 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5560 struct tg3_napi *tnapi = dev_id;
5561 struct tg3 *tp = tnapi->tp;
5562 struct tg3_hw_status *sblk = tnapi->hw_status;
5563 unsigned int handled = 1;
5565 /* In INTx mode, it is possible for the interrupt to arrive at
5566 * the CPU before the status block posted prior to the interrupt.
5567 * Reading the PCI State register will confirm whether the
5568 * interrupt is ours and will flush the status block.
5570 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5571 if (tg3_flag(tp, CHIP_RESETTING) ||
5572 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5579 * writing any value to intr-mbox-0 clears PCI INTA# and
5580 * chip-internal interrupt pending events.
5581 * writing non-zero to intr-mbox-0 additional tells the
5582 * NIC to stop sending us irqs, engaging "in-intr-handler"
5585 * Flush the mailbox to de-assert the IRQ immediately to prevent
5586 * spurious interrupts. The flush impacts performance but
5587 * excessive spurious interrupts can be worse in some cases.
5589 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5592 * In a shared interrupt configuration, sometimes other devices'
5593 * interrupts will scream. We record the current status tag here
5594 * so that the above check can report that the screaming interrupts
5595 * are unhandled. Eventually they will be silenced.
5597 tnapi->last_irq_tag = sblk->status_tag;
5599 if (tg3_irq_sync(tp))
5602 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5604 napi_schedule(&tnapi->napi);
5607 return IRQ_RETVAL(handled);
5610 /* ISR for interrupt test */
5611 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5613 struct tg3_napi *tnapi = dev_id;
5614 struct tg3 *tp = tnapi->tp;
5615 struct tg3_hw_status *sblk = tnapi->hw_status;
5617 if ((sblk->status & SD_STATUS_UPDATED) ||
5618 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5619 tg3_disable_ints(tp);
5620 return IRQ_RETVAL(1);
5622 return IRQ_RETVAL(0);
5625 static int tg3_init_hw(struct tg3 *, int);
5626 static int tg3_halt(struct tg3 *, int, int);
5628 /* Restart hardware after configuration changes, self-test, etc.
5629 * Invoked with tp->lock held.
5631 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5632 __releases(tp->lock)
5633 __acquires(tp->lock)
5637 err = tg3_init_hw(tp, reset_phy);
5640 "Failed to re-initialize device, aborting\n");
5641 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5642 tg3_full_unlock(tp);
5643 del_timer_sync(&tp->timer);
5645 tg3_napi_enable(tp);
5647 tg3_full_lock(tp, 0);
5652 #ifdef CONFIG_NET_POLL_CONTROLLER
5653 static void tg3_poll_controller(struct net_device *dev)
5656 struct tg3 *tp = netdev_priv(dev);
5658 for (i = 0; i < tp->irq_cnt; i++)
5659 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5663 static void tg3_reset_task(struct work_struct *work)
5665 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5667 unsigned int restart_timer;
5669 tg3_full_lock(tp, 0);
5671 if (!netif_running(tp->dev)) {
5672 tg3_full_unlock(tp);
5676 tg3_full_unlock(tp);
5682 tg3_full_lock(tp, 1);
5684 restart_timer = tg3_flag(tp, RESTART_TIMER);
5685 tg3_flag_clear(tp, RESTART_TIMER);
5687 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5688 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5689 tp->write32_rx_mbox = tg3_write_flush_reg32;
5690 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5691 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5694 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5695 err = tg3_init_hw(tp, 1);
5699 tg3_netif_start(tp);
5702 mod_timer(&tp->timer, jiffies + 1);
5705 tg3_full_unlock(tp);
5711 static void tg3_tx_timeout(struct net_device *dev)
5713 struct tg3 *tp = netdev_priv(dev);
5715 if (netif_msg_tx_err(tp)) {
5716 netdev_err(dev, "transmit timed out, resetting\n");
5720 schedule_work(&tp->reset_task);
5723 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5724 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5726 u32 base = (u32) mapping & 0xffffffff;
5728 return (base > 0xffffdcc0) && (base + len + 8 < base);
5731 /* Test for DMA addresses > 40-bit */
5732 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5735 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5736 if (tg3_flag(tp, 40BIT_DMA_BUG))
5737 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5744 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5745 dma_addr_t mapping, int len, u32 flags,
5748 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5749 int is_end = (mss_and_is_end & 0x1);
5750 u32 mss = (mss_and_is_end >> 1);
5754 flags |= TXD_FLAG_END;
5755 if (flags & TXD_FLAG_VLAN) {
5756 vlan_tag = flags >> 16;
5759 vlan_tag |= (mss << TXD_MSS_SHIFT);
5761 txd->addr_hi = ((u64) mapping >> 32);
5762 txd->addr_lo = ((u64) mapping & 0xffffffff);
5763 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5764 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5767 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5768 struct sk_buff *skb, int last)
5771 u32 entry = tnapi->tx_prod;
5772 struct ring_info *txb = &tnapi->tx_buffers[entry];
5774 pci_unmap_single(tnapi->tp->pdev,
5775 dma_unmap_addr(txb, mapping),
5778 for (i = 0; i < last; i++) {
5779 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5781 entry = NEXT_TX(entry);
5782 txb = &tnapi->tx_buffers[entry];
5784 pci_unmap_page(tnapi->tp->pdev,
5785 dma_unmap_addr(txb, mapping),
5786 frag->size, PCI_DMA_TODEVICE);
5790 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5791 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5792 struct sk_buff *skb,
5793 u32 base_flags, u32 mss)
5795 struct tg3 *tp = tnapi->tp;
5796 struct sk_buff *new_skb;
5797 dma_addr_t new_addr = 0;
5798 u32 entry = tnapi->tx_prod;
5801 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5802 new_skb = skb_copy(skb, GFP_ATOMIC);
5804 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5806 new_skb = skb_copy_expand(skb,
5807 skb_headroom(skb) + more_headroom,
5808 skb_tailroom(skb), GFP_ATOMIC);
5814 /* New SKB is guaranteed to be linear. */
5815 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5817 /* Make sure the mapping succeeded */
5818 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5820 dev_kfree_skb(new_skb);
5822 /* Make sure new skb does not cross any 4G boundaries.
5823 * Drop the packet if it does.
5825 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5826 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5827 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5830 dev_kfree_skb(new_skb);
5832 tnapi->tx_buffers[entry].skb = new_skb;
5833 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5836 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5837 base_flags, 1 | (mss << 1));
5846 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5848 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5849 * TSO header is greater than 80 bytes.
5851 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5853 struct sk_buff *segs, *nskb;
5854 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5856 /* Estimate the number of fragments in the worst case */
5857 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5858 netif_stop_queue(tp->dev);
5860 /* netif_tx_stop_queue() must be done before checking
5861 * checking tx index in tg3_tx_avail() below, because in
5862 * tg3_tx(), we update tx index before checking for
5863 * netif_tx_queue_stopped().
5866 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5867 return NETDEV_TX_BUSY;
5869 netif_wake_queue(tp->dev);
5872 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5874 goto tg3_tso_bug_end;
5880 tg3_start_xmit(nskb, tp->dev);
5886 return NETDEV_TX_OK;
5889 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5890 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5892 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5894 struct tg3 *tp = netdev_priv(dev);
5895 u32 len, entry, base_flags, mss;
5896 int i = -1, would_hit_hwbug;
5898 struct tg3_napi *tnapi;
5899 struct netdev_queue *txq;
5902 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5903 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5904 if (tg3_flag(tp, ENABLE_TSS))
5907 /* We are running in BH disabled context with netif_tx_lock
5908 * and TX reclaim runs via tp->napi.poll inside of a software
5909 * interrupt. Furthermore, IRQ processing runs lockless so we have
5910 * no IRQ context deadlocks to worry about either. Rejoice!
5912 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5913 if (!netif_tx_queue_stopped(txq)) {
5914 netif_tx_stop_queue(txq);
5916 /* This is a hard error, log it. */
5918 "BUG! Tx Ring full when queue awake!\n");
5920 return NETDEV_TX_BUSY;
5923 entry = tnapi->tx_prod;
5925 if (skb->ip_summed == CHECKSUM_PARTIAL)
5926 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5928 mss = skb_shinfo(skb)->gso_size;
5931 u32 tcp_opt_len, hdr_len;
5933 if (skb_header_cloned(skb) &&
5934 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5940 tcp_opt_len = tcp_optlen(skb);
5942 if (skb_is_gso_v6(skb)) {
5943 hdr_len = skb_headlen(skb) - ETH_HLEN;
5947 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5948 hdr_len = ip_tcp_len + tcp_opt_len;
5951 iph->tot_len = htons(mss + hdr_len);
5954 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5955 tg3_flag(tp, TSO_BUG))
5956 return tg3_tso_bug(tp, skb);
5958 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5959 TXD_FLAG_CPU_POST_DMA);
5961 if (tg3_flag(tp, HW_TSO_1) ||
5962 tg3_flag(tp, HW_TSO_2) ||
5963 tg3_flag(tp, HW_TSO_3)) {
5964 tcp_hdr(skb)->check = 0;
5965 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5967 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5972 if (tg3_flag(tp, HW_TSO_3)) {
5973 mss |= (hdr_len & 0xc) << 12;
5975 base_flags |= 0x00000010;
5976 base_flags |= (hdr_len & 0x3e0) << 5;
5977 } else if (tg3_flag(tp, HW_TSO_2))
5978 mss |= hdr_len << 9;
5979 else if (tg3_flag(tp, HW_TSO_1) ||
5980 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5981 if (tcp_opt_len || iph->ihl > 5) {
5984 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5985 mss |= (tsflags << 11);
5988 if (tcp_opt_len || iph->ihl > 5) {
5991 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5992 base_flags |= tsflags << 12;
5997 if (vlan_tx_tag_present(skb))
5998 base_flags |= (TXD_FLAG_VLAN |
5999 (vlan_tx_tag_get(skb) << 16));
6001 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6002 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6003 base_flags |= TXD_FLAG_JMB_PKT;
6005 len = skb_headlen(skb);
6007 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6008 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6013 tnapi->tx_buffers[entry].skb = skb;
6014 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6016 would_hit_hwbug = 0;
6018 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6019 would_hit_hwbug = 1;
6021 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6022 tg3_4g_overflow_test(mapping, len))
6023 would_hit_hwbug = 1;
6025 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6026 tg3_40bit_overflow_test(tp, mapping, len))
6027 would_hit_hwbug = 1;
6029 if (tg3_flag(tp, 5701_DMA_BUG))
6030 would_hit_hwbug = 1;
6032 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6033 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6035 entry = NEXT_TX(entry);
6037 /* Now loop through additional data fragments, and queue them. */
6038 if (skb_shinfo(skb)->nr_frags > 0) {
6039 last = skb_shinfo(skb)->nr_frags - 1;
6040 for (i = 0; i <= last; i++) {
6041 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6044 mapping = pci_map_page(tp->pdev,
6047 len, PCI_DMA_TODEVICE);
6049 tnapi->tx_buffers[entry].skb = NULL;
6050 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6052 if (pci_dma_mapping_error(tp->pdev, mapping))
6055 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6057 would_hit_hwbug = 1;
6059 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6060 tg3_4g_overflow_test(mapping, len))
6061 would_hit_hwbug = 1;
6063 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6064 tg3_40bit_overflow_test(tp, mapping, len))
6065 would_hit_hwbug = 1;
6067 if (tg3_flag(tp, HW_TSO_1) ||
6068 tg3_flag(tp, HW_TSO_2) ||
6069 tg3_flag(tp, HW_TSO_3))
6070 tg3_set_txd(tnapi, entry, mapping, len,
6071 base_flags, (i == last)|(mss << 1));
6073 tg3_set_txd(tnapi, entry, mapping, len,
6074 base_flags, (i == last));
6076 entry = NEXT_TX(entry);
6080 if (would_hit_hwbug) {
6081 tg3_skb_error_unmap(tnapi, skb, i);
6083 /* If the workaround fails due to memory/mapping
6084 * failure, silently drop this packet.
6086 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6089 entry = NEXT_TX(tnapi->tx_prod);
6092 /* Packets are ready, update Tx producer idx local and on card. */
6093 tw32_tx_mbox(tnapi->prodmbox, entry);
6095 skb_tx_timestamp(skb);
6097 tnapi->tx_prod = entry;
6098 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6099 netif_tx_stop_queue(txq);
6101 /* netif_tx_stop_queue() must be done before checking
6102 * checking tx index in tg3_tx_avail() below, because in
6103 * tg3_tx(), we update tx index before checking for
6104 * netif_tx_queue_stopped().
6107 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6108 netif_tx_wake_queue(txq);
6114 return NETDEV_TX_OK;
6117 tg3_skb_error_unmap(tnapi, skb, i);
6119 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6120 return NETDEV_TX_OK;
6123 static void tg3_set_loopback(struct net_device *dev, u32 features)
6125 struct tg3 *tp = netdev_priv(dev);
6127 if (features & NETIF_F_LOOPBACK) {
6128 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6132 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6133 * loopback mode if Half-Duplex mode was negotiated earlier.
6135 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6137 /* Enable internal MAC loopback mode */
6138 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6139 spin_lock_bh(&tp->lock);
6140 tw32(MAC_MODE, tp->mac_mode);
6141 netif_carrier_on(tp->dev);
6142 spin_unlock_bh(&tp->lock);
6143 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6145 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6148 /* Disable internal MAC loopback mode */
6149 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6150 spin_lock_bh(&tp->lock);
6151 tw32(MAC_MODE, tp->mac_mode);
6152 /* Force link status check */
6153 tg3_setup_phy(tp, 1);
6154 spin_unlock_bh(&tp->lock);
6155 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6159 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6161 struct tg3 *tp = netdev_priv(dev);
6163 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6164 features &= ~NETIF_F_ALL_TSO;
6169 static int tg3_set_features(struct net_device *dev, u32 features)
6171 u32 changed = dev->features ^ features;
6173 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6174 tg3_set_loopback(dev, features);
6179 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6184 if (new_mtu > ETH_DATA_LEN) {
6185 if (tg3_flag(tp, 5780_CLASS)) {
6186 netdev_update_features(dev);
6187 tg3_flag_clear(tp, TSO_CAPABLE);
6189 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6192 if (tg3_flag(tp, 5780_CLASS)) {
6193 tg3_flag_set(tp, TSO_CAPABLE);
6194 netdev_update_features(dev);
6196 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6200 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6202 struct tg3 *tp = netdev_priv(dev);
6205 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6208 if (!netif_running(dev)) {
6209 /* We'll just catch it later when the
6212 tg3_set_mtu(dev, tp, new_mtu);
6220 tg3_full_lock(tp, 1);
6222 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6224 tg3_set_mtu(dev, tp, new_mtu);
6226 err = tg3_restart_hw(tp, 0);
6229 tg3_netif_start(tp);
6231 tg3_full_unlock(tp);
6239 static void tg3_rx_prodring_free(struct tg3 *tp,
6240 struct tg3_rx_prodring_set *tpr)
6244 if (tpr != &tp->napi[0].prodring) {
6245 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6246 i = (i + 1) & tp->rx_std_ring_mask)
6247 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6250 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6251 for (i = tpr->rx_jmb_cons_idx;
6252 i != tpr->rx_jmb_prod_idx;
6253 i = (i + 1) & tp->rx_jmb_ring_mask) {
6254 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6262 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6263 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6266 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6267 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6268 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6273 /* Initialize rx rings for packet processing.
6275 * The chip has been shut down and the driver detached from
6276 * the networking, so no interrupts or new tx packets will
6277 * end up in the driver. tp->{tx,}lock are held and thus
6280 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6281 struct tg3_rx_prodring_set *tpr)
6283 u32 i, rx_pkt_dma_sz;
6285 tpr->rx_std_cons_idx = 0;
6286 tpr->rx_std_prod_idx = 0;
6287 tpr->rx_jmb_cons_idx = 0;
6288 tpr->rx_jmb_prod_idx = 0;
6290 if (tpr != &tp->napi[0].prodring) {
6291 memset(&tpr->rx_std_buffers[0], 0,
6292 TG3_RX_STD_BUFF_RING_SIZE(tp));
6293 if (tpr->rx_jmb_buffers)
6294 memset(&tpr->rx_jmb_buffers[0], 0,
6295 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6299 /* Zero out all descriptors. */
6300 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6302 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6303 if (tg3_flag(tp, 5780_CLASS) &&
6304 tp->dev->mtu > ETH_DATA_LEN)
6305 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6306 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6308 /* Initialize invariants of the rings, we only set this
6309 * stuff once. This works because the card does not
6310 * write into the rx buffer posting rings.
6312 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6313 struct tg3_rx_buffer_desc *rxd;
6315 rxd = &tpr->rx_std[i];
6316 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6317 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6318 rxd->opaque = (RXD_OPAQUE_RING_STD |
6319 (i << RXD_OPAQUE_INDEX_SHIFT));
6322 /* Now allocate fresh SKBs for each rx ring. */
6323 for (i = 0; i < tp->rx_pending; i++) {
6324 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6325 netdev_warn(tp->dev,
6326 "Using a smaller RX standard ring. Only "
6327 "%d out of %d buffers were allocated "
6328 "successfully\n", i, tp->rx_pending);
6336 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6339 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6341 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6344 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6345 struct tg3_rx_buffer_desc *rxd;
6347 rxd = &tpr->rx_jmb[i].std;
6348 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6349 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6351 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6352 (i << RXD_OPAQUE_INDEX_SHIFT));
6355 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6356 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6357 netdev_warn(tp->dev,
6358 "Using a smaller RX jumbo ring. Only %d "
6359 "out of %d buffers were allocated "
6360 "successfully\n", i, tp->rx_jumbo_pending);
6363 tp->rx_jumbo_pending = i;
6372 tg3_rx_prodring_free(tp, tpr);
6376 static void tg3_rx_prodring_fini(struct tg3 *tp,
6377 struct tg3_rx_prodring_set *tpr)
6379 kfree(tpr->rx_std_buffers);
6380 tpr->rx_std_buffers = NULL;
6381 kfree(tpr->rx_jmb_buffers);
6382 tpr->rx_jmb_buffers = NULL;
6384 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6385 tpr->rx_std, tpr->rx_std_mapping);
6389 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6390 tpr->rx_jmb, tpr->rx_jmb_mapping);
6395 static int tg3_rx_prodring_init(struct tg3 *tp,
6396 struct tg3_rx_prodring_set *tpr)
6398 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6400 if (!tpr->rx_std_buffers)
6403 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6404 TG3_RX_STD_RING_BYTES(tp),
6405 &tpr->rx_std_mapping,
6410 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6411 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6413 if (!tpr->rx_jmb_buffers)
6416 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6417 TG3_RX_JMB_RING_BYTES(tp),
6418 &tpr->rx_jmb_mapping,
6427 tg3_rx_prodring_fini(tp, tpr);
6431 /* Free up pending packets in all rx/tx rings.
6433 * The chip has been shut down and the driver detached from
6434 * the networking, so no interrupts or new tx packets will
6435 * end up in the driver. tp->{tx,}lock is not held and we are not
6436 * in an interrupt context and thus may sleep.
6438 static void tg3_free_rings(struct tg3 *tp)
6442 for (j = 0; j < tp->irq_cnt; j++) {
6443 struct tg3_napi *tnapi = &tp->napi[j];
6445 tg3_rx_prodring_free(tp, &tnapi->prodring);
6447 if (!tnapi->tx_buffers)
6450 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6451 struct ring_info *txp;
6452 struct sk_buff *skb;
6455 txp = &tnapi->tx_buffers[i];
6463 pci_unmap_single(tp->pdev,
6464 dma_unmap_addr(txp, mapping),
6471 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6472 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6473 pci_unmap_page(tp->pdev,
6474 dma_unmap_addr(txp, mapping),
6475 skb_shinfo(skb)->frags[k].size,
6480 dev_kfree_skb_any(skb);
6485 /* Initialize tx/rx rings for packet processing.
6487 * The chip has been shut down and the driver detached from
6488 * the networking, so no interrupts or new tx packets will
6489 * end up in the driver. tp->{tx,}lock are held and thus
6492 static int tg3_init_rings(struct tg3 *tp)
6496 /* Free up all the SKBs. */
6499 for (i = 0; i < tp->irq_cnt; i++) {
6500 struct tg3_napi *tnapi = &tp->napi[i];
6502 tnapi->last_tag = 0;
6503 tnapi->last_irq_tag = 0;
6504 tnapi->hw_status->status = 0;
6505 tnapi->hw_status->status_tag = 0;
6506 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6511 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6513 tnapi->rx_rcb_ptr = 0;
6515 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6517 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6527 * Must not be invoked with interrupt sources disabled and
6528 * the hardware shutdown down.
6530 static void tg3_free_consistent(struct tg3 *tp)
6534 for (i = 0; i < tp->irq_cnt; i++) {
6535 struct tg3_napi *tnapi = &tp->napi[i];
6537 if (tnapi->tx_ring) {
6538 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6539 tnapi->tx_ring, tnapi->tx_desc_mapping);
6540 tnapi->tx_ring = NULL;
6543 kfree(tnapi->tx_buffers);
6544 tnapi->tx_buffers = NULL;
6546 if (tnapi->rx_rcb) {
6547 dma_free_coherent(&tp->pdev->dev,
6548 TG3_RX_RCB_RING_BYTES(tp),
6550 tnapi->rx_rcb_mapping);
6551 tnapi->rx_rcb = NULL;
6554 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6556 if (tnapi->hw_status) {
6557 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6559 tnapi->status_mapping);
6560 tnapi->hw_status = NULL;
6565 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6566 tp->hw_stats, tp->stats_mapping);
6567 tp->hw_stats = NULL;
6572 * Must not be invoked with interrupt sources disabled and
6573 * the hardware shutdown down. Can sleep.
6575 static int tg3_alloc_consistent(struct tg3 *tp)
6579 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6580 sizeof(struct tg3_hw_stats),
6586 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6588 for (i = 0; i < tp->irq_cnt; i++) {
6589 struct tg3_napi *tnapi = &tp->napi[i];
6590 struct tg3_hw_status *sblk;
6592 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6594 &tnapi->status_mapping,
6596 if (!tnapi->hw_status)
6599 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6600 sblk = tnapi->hw_status;
6602 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6605 /* If multivector TSS is enabled, vector 0 does not handle
6606 * tx interrupts. Don't allocate any resources for it.
6608 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6609 (i && tg3_flag(tp, ENABLE_TSS))) {
6610 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6613 if (!tnapi->tx_buffers)
6616 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6618 &tnapi->tx_desc_mapping,
6620 if (!tnapi->tx_ring)
6625 * When RSS is enabled, the status block format changes
6626 * slightly. The "rx_jumbo_consumer", "reserved",
6627 * and "rx_mini_consumer" members get mapped to the
6628 * other three rx return ring producer indexes.
6632 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6635 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6638 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6641 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6646 * If multivector RSS is enabled, vector 0 does not handle
6647 * rx or tx interrupts. Don't allocate any resources for it.
6649 if (!i && tg3_flag(tp, ENABLE_RSS))
6652 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6653 TG3_RX_RCB_RING_BYTES(tp),
6654 &tnapi->rx_rcb_mapping,
6659 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6665 tg3_free_consistent(tp);
6669 #define MAX_WAIT_CNT 1000
6671 /* To stop a block, clear the enable bit and poll till it
6672 * clears. tp->lock is held.
6674 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6679 if (tg3_flag(tp, 5705_PLUS)) {
6686 /* We can't enable/disable these bits of the
6687 * 5705/5750, just say success.
6700 for (i = 0; i < MAX_WAIT_CNT; i++) {
6703 if ((val & enable_bit) == 0)
6707 if (i == MAX_WAIT_CNT && !silent) {
6708 dev_err(&tp->pdev->dev,
6709 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6717 /* tp->lock is held. */
6718 static int tg3_abort_hw(struct tg3 *tp, int silent)
6722 tg3_disable_ints(tp);
6724 tp->rx_mode &= ~RX_MODE_ENABLE;
6725 tw32_f(MAC_RX_MODE, tp->rx_mode);
6728 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6729 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6730 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6731 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6732 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6733 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6735 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6736 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6737 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6738 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6739 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6740 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6741 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6743 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6744 tw32_f(MAC_MODE, tp->mac_mode);
6747 tp->tx_mode &= ~TX_MODE_ENABLE;
6748 tw32_f(MAC_TX_MODE, tp->tx_mode);
6750 for (i = 0; i < MAX_WAIT_CNT; i++) {
6752 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6755 if (i >= MAX_WAIT_CNT) {
6756 dev_err(&tp->pdev->dev,
6757 "%s timed out, TX_MODE_ENABLE will not clear "
6758 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6762 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6763 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6764 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6766 tw32(FTQ_RESET, 0xffffffff);
6767 tw32(FTQ_RESET, 0x00000000);
6769 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6770 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6772 for (i = 0; i < tp->irq_cnt; i++) {
6773 struct tg3_napi *tnapi = &tp->napi[i];
6774 if (tnapi->hw_status)
6775 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6778 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6783 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6788 /* NCSI does not support APE events */
6789 if (tg3_flag(tp, APE_HAS_NCSI))
6792 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6793 if (apedata != APE_SEG_SIG_MAGIC)
6796 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6797 if (!(apedata & APE_FW_STATUS_READY))
6800 /* Wait for up to 1 millisecond for APE to service previous event. */
6801 for (i = 0; i < 10; i++) {
6802 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6805 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6807 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6808 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6809 event | APE_EVENT_STATUS_EVENT_PENDING);
6811 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6813 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6819 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6820 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6823 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6828 if (!tg3_flag(tp, ENABLE_APE))
6832 case RESET_KIND_INIT:
6833 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6834 APE_HOST_SEG_SIG_MAGIC);
6835 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6836 APE_HOST_SEG_LEN_MAGIC);
6837 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6838 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6839 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6840 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6841 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6842 APE_HOST_BEHAV_NO_PHYLOCK);
6843 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6844 TG3_APE_HOST_DRVR_STATE_START);
6846 event = APE_EVENT_STATUS_STATE_START;
6848 case RESET_KIND_SHUTDOWN:
6849 /* With the interface we are currently using,
6850 * APE does not track driver state. Wiping
6851 * out the HOST SEGMENT SIGNATURE forces
6852 * the APE to assume OS absent status.
6854 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6856 if (device_may_wakeup(&tp->pdev->dev) &&
6857 tg3_flag(tp, WOL_ENABLE)) {
6858 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6859 TG3_APE_HOST_WOL_SPEED_AUTO);
6860 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6862 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6864 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6866 event = APE_EVENT_STATUS_STATE_UNLOAD;
6868 case RESET_KIND_SUSPEND:
6869 event = APE_EVENT_STATUS_STATE_SUSPEND;
6875 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6877 tg3_ape_send_event(tp, event);
6880 /* tp->lock is held. */
6881 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6883 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6884 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6886 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6888 case RESET_KIND_INIT:
6889 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6893 case RESET_KIND_SHUTDOWN:
6894 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6898 case RESET_KIND_SUSPEND:
6899 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6908 if (kind == RESET_KIND_INIT ||
6909 kind == RESET_KIND_SUSPEND)
6910 tg3_ape_driver_state_change(tp, kind);
6913 /* tp->lock is held. */
6914 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6916 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6918 case RESET_KIND_INIT:
6919 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6920 DRV_STATE_START_DONE);
6923 case RESET_KIND_SHUTDOWN:
6924 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6925 DRV_STATE_UNLOAD_DONE);
6933 if (kind == RESET_KIND_SHUTDOWN)
6934 tg3_ape_driver_state_change(tp, kind);
6937 /* tp->lock is held. */
6938 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6940 if (tg3_flag(tp, ENABLE_ASF)) {
6942 case RESET_KIND_INIT:
6943 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6947 case RESET_KIND_SHUTDOWN:
6948 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6952 case RESET_KIND_SUSPEND:
6953 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6963 static int tg3_poll_fw(struct tg3 *tp)
6968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6969 /* Wait up to 20ms for init done. */
6970 for (i = 0; i < 200; i++) {
6971 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6978 /* Wait for firmware initialization to complete. */
6979 for (i = 0; i < 100000; i++) {
6980 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6981 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6986 /* Chip might not be fitted with firmware. Some Sun onboard
6987 * parts are configured like that. So don't signal the timeout
6988 * of the above loop as an error, but do report the lack of
6989 * running firmware once.
6991 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6992 tg3_flag_set(tp, NO_FWARE_REPORTED);
6994 netdev_info(tp->dev, "No firmware running\n");
6997 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6998 /* The 57765 A0 needs a little more
6999 * time to do some important work.
7007 /* Save PCI command register before chip reset */
7008 static void tg3_save_pci_state(struct tg3 *tp)
7010 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7013 /* Restore PCI state after chip reset */
7014 static void tg3_restore_pci_state(struct tg3 *tp)
7018 /* Re-enable indirect register accesses. */
7019 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7020 tp->misc_host_ctrl);
7022 /* Set MAX PCI retry to zero. */
7023 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7024 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7025 tg3_flag(tp, PCIX_MODE))
7026 val |= PCISTATE_RETRY_SAME_DMA;
7027 /* Allow reads and writes to the APE register and memory space. */
7028 if (tg3_flag(tp, ENABLE_APE))
7029 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7030 PCISTATE_ALLOW_APE_SHMEM_WR |
7031 PCISTATE_ALLOW_APE_PSPACE_WR;
7032 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7034 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7036 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7037 if (tg3_flag(tp, PCI_EXPRESS))
7038 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7040 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7041 tp->pci_cacheline_sz);
7042 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7047 /* Make sure PCI-X relaxed ordering bit is clear. */
7048 if (tg3_flag(tp, PCIX_MODE)) {
7051 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7053 pcix_cmd &= ~PCI_X_CMD_ERO;
7054 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7058 if (tg3_flag(tp, 5780_CLASS)) {
7060 /* Chip reset on 5780 will reset MSI enable bit,
7061 * so need to restore it.
7063 if (tg3_flag(tp, USING_MSI)) {
7066 pci_read_config_word(tp->pdev,
7067 tp->msi_cap + PCI_MSI_FLAGS,
7069 pci_write_config_word(tp->pdev,
7070 tp->msi_cap + PCI_MSI_FLAGS,
7071 ctrl | PCI_MSI_FLAGS_ENABLE);
7072 val = tr32(MSGINT_MODE);
7073 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7078 static void tg3_stop_fw(struct tg3 *);
7080 /* tp->lock is held. */
7081 static int tg3_chip_reset(struct tg3 *tp)
7084 void (*write_op)(struct tg3 *, u32, u32);
7089 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7091 /* No matching tg3_nvram_unlock() after this because
7092 * chip reset below will undo the nvram lock.
7094 tp->nvram_lock_cnt = 0;
7096 /* GRC_MISC_CFG core clock reset will clear the memory
7097 * enable bit in PCI register 4 and the MSI enable bit
7098 * on some chips, so we save relevant registers here.
7100 tg3_save_pci_state(tp);
7102 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7103 tg3_flag(tp, 5755_PLUS))
7104 tw32(GRC_FASTBOOT_PC, 0);
7107 * We must avoid the readl() that normally takes place.
7108 * It locks machines, causes machine checks, and other
7109 * fun things. So, temporarily disable the 5701
7110 * hardware workaround, while we do the reset.
7112 write_op = tp->write32;
7113 if (write_op == tg3_write_flush_reg32)
7114 tp->write32 = tg3_write32;
7116 /* Prevent the irq handler from reading or writing PCI registers
7117 * during chip reset when the memory enable bit in the PCI command
7118 * register may be cleared. The chip does not generate interrupt
7119 * at this time, but the irq handler may still be called due to irq
7120 * sharing or irqpoll.
7122 tg3_flag_set(tp, CHIP_RESETTING);
7123 for (i = 0; i < tp->irq_cnt; i++) {
7124 struct tg3_napi *tnapi = &tp->napi[i];
7125 if (tnapi->hw_status) {
7126 tnapi->hw_status->status = 0;
7127 tnapi->hw_status->status_tag = 0;
7129 tnapi->last_tag = 0;
7130 tnapi->last_irq_tag = 0;
7134 for (i = 0; i < tp->irq_cnt; i++)
7135 synchronize_irq(tp->napi[i].irq_vec);
7137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7138 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7139 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7143 val = GRC_MISC_CFG_CORECLK_RESET;
7145 if (tg3_flag(tp, PCI_EXPRESS)) {
7146 /* Force PCIe 1.0a mode */
7147 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7148 !tg3_flag(tp, 57765_PLUS) &&
7149 tr32(TG3_PCIE_PHY_TSTCTL) ==
7150 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7151 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7153 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7154 tw32(GRC_MISC_CFG, (1 << 29));
7159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7160 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7161 tw32(GRC_VCPU_EXT_CTRL,
7162 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7165 /* Manage gphy power for all CPMU absent PCIe devices. */
7166 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7167 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7169 tw32(GRC_MISC_CFG, val);
7171 /* restore 5701 hardware bug workaround write method */
7172 tp->write32 = write_op;
7174 /* Unfortunately, we have to delay before the PCI read back.
7175 * Some 575X chips even will not respond to a PCI cfg access
7176 * when the reset command is given to the chip.
7178 * How do these hardware designers expect things to work
7179 * properly if the PCI write is posted for a long period
7180 * of time? It is always necessary to have some method by
7181 * which a register read back can occur to push the write
7182 * out which does the reset.
7184 * For most tg3 variants the trick below was working.
7189 /* Flush PCI posted writes. The normal MMIO registers
7190 * are inaccessible at this time so this is the only
7191 * way to make this reliably (actually, this is no longer
7192 * the case, see above). I tried to use indirect
7193 * register read/write but this upset some 5701 variants.
7195 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7199 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7202 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7206 /* Wait for link training to complete. */
7207 for (i = 0; i < 5000; i++)
7210 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7211 pci_write_config_dword(tp->pdev, 0xc4,
7212 cfg_val | (1 << 15));
7215 /* Clear the "no snoop" and "relaxed ordering" bits. */
7216 pci_read_config_word(tp->pdev,
7217 tp->pcie_cap + PCI_EXP_DEVCTL,
7219 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7220 PCI_EXP_DEVCTL_NOSNOOP_EN);
7222 * Older PCIe devices only support the 128 byte
7223 * MPS setting. Enforce the restriction.
7225 if (!tg3_flag(tp, CPMU_PRESENT))
7226 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7227 pci_write_config_word(tp->pdev,
7228 tp->pcie_cap + PCI_EXP_DEVCTL,
7231 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7233 /* Clear error status */
7234 pci_write_config_word(tp->pdev,
7235 tp->pcie_cap + PCI_EXP_DEVSTA,
7236 PCI_EXP_DEVSTA_CED |
7237 PCI_EXP_DEVSTA_NFED |
7238 PCI_EXP_DEVSTA_FED |
7239 PCI_EXP_DEVSTA_URD);
7242 tg3_restore_pci_state(tp);
7244 tg3_flag_clear(tp, CHIP_RESETTING);
7245 tg3_flag_clear(tp, ERROR_PROCESSED);
7248 if (tg3_flag(tp, 5780_CLASS))
7249 val = tr32(MEMARB_MODE);
7250 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7252 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7254 tw32(0x5000, 0x400);
7257 tw32(GRC_MODE, tp->grc_mode);
7259 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7262 tw32(0xc4, val | (1 << 15));
7265 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7267 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7268 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7269 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7270 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7273 if (tg3_flag(tp, ENABLE_APE))
7274 tp->mac_mode = MAC_MODE_APE_TX_EN |
7275 MAC_MODE_APE_RX_EN |
7276 MAC_MODE_TDE_ENABLE;
7278 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7279 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7281 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7282 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7287 tw32_f(MAC_MODE, val);
7290 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7292 err = tg3_poll_fw(tp);
7298 if (tg3_flag(tp, PCI_EXPRESS) &&
7299 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7300 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7301 !tg3_flag(tp, 57765_PLUS)) {
7304 tw32(0x7c00, val | (1 << 25));
7307 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7308 val = tr32(TG3_CPMU_CLCK_ORIDE);
7309 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7312 /* Reprobe ASF enable state. */
7313 tg3_flag_clear(tp, ENABLE_ASF);
7314 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7315 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7316 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7319 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7320 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7321 tg3_flag_set(tp, ENABLE_ASF);
7322 tp->last_event_jiffies = jiffies;
7323 if (tg3_flag(tp, 5750_PLUS))
7324 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7331 /* tp->lock is held. */
7332 static void tg3_stop_fw(struct tg3 *tp)
7334 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7335 /* Wait for RX cpu to ACK the previous event. */
7336 tg3_wait_for_event_ack(tp);
7338 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7340 tg3_generate_fw_event(tp);
7342 /* Wait for RX cpu to ACK this event. */
7343 tg3_wait_for_event_ack(tp);
7347 /* tp->lock is held. */
7348 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7354 tg3_write_sig_pre_reset(tp, kind);
7356 tg3_abort_hw(tp, silent);
7357 err = tg3_chip_reset(tp);
7359 __tg3_set_mac_addr(tp, 0);
7361 tg3_write_sig_legacy(tp, kind);
7362 tg3_write_sig_post_reset(tp, kind);
7370 #define RX_CPU_SCRATCH_BASE 0x30000
7371 #define RX_CPU_SCRATCH_SIZE 0x04000
7372 #define TX_CPU_SCRATCH_BASE 0x34000
7373 #define TX_CPU_SCRATCH_SIZE 0x04000
7375 /* tp->lock is held. */
7376 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7380 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7382 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7383 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7385 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7388 if (offset == RX_CPU_BASE) {
7389 for (i = 0; i < 10000; i++) {
7390 tw32(offset + CPU_STATE, 0xffffffff);
7391 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7392 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7396 tw32(offset + CPU_STATE, 0xffffffff);
7397 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7400 for (i = 0; i < 10000; i++) {
7401 tw32(offset + CPU_STATE, 0xffffffff);
7402 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7403 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7409 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7410 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7414 /* Clear firmware's nvram arbitration. */
7415 if (tg3_flag(tp, NVRAM))
7416 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7421 unsigned int fw_base;
7422 unsigned int fw_len;
7423 const __be32 *fw_data;
7426 /* tp->lock is held. */
7427 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7428 int cpu_scratch_size, struct fw_info *info)
7430 int err, lock_err, i;
7431 void (*write_op)(struct tg3 *, u32, u32);
7433 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7435 "%s: Trying to load TX cpu firmware which is 5705\n",
7440 if (tg3_flag(tp, 5705_PLUS))
7441 write_op = tg3_write_mem;
7443 write_op = tg3_write_indirect_reg32;
7445 /* It is possible that bootcode is still loading at this point.
7446 * Get the nvram lock first before halting the cpu.
7448 lock_err = tg3_nvram_lock(tp);
7449 err = tg3_halt_cpu(tp, cpu_base);
7451 tg3_nvram_unlock(tp);
7455 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7456 write_op(tp, cpu_scratch_base + i, 0);
7457 tw32(cpu_base + CPU_STATE, 0xffffffff);
7458 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7459 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7460 write_op(tp, (cpu_scratch_base +
7461 (info->fw_base & 0xffff) +
7463 be32_to_cpu(info->fw_data[i]));
7471 /* tp->lock is held. */
7472 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7474 struct fw_info info;
7475 const __be32 *fw_data;
7478 fw_data = (void *)tp->fw->data;
7480 /* Firmware blob starts with version numbers, followed by
7481 start address and length. We are setting complete length.
7482 length = end_address_of_bss - start_address_of_text.
7483 Remainder is the blob to be loaded contiguously
7484 from start address. */
7486 info.fw_base = be32_to_cpu(fw_data[1]);
7487 info.fw_len = tp->fw->size - 12;
7488 info.fw_data = &fw_data[3];
7490 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7491 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7496 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7497 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7502 /* Now startup only the RX cpu. */
7503 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7504 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7506 for (i = 0; i < 5; i++) {
7507 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7509 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7510 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7511 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7515 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7516 "should be %08x\n", __func__,
7517 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7520 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7521 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7526 /* tp->lock is held. */
7527 static int tg3_load_tso_firmware(struct tg3 *tp)
7529 struct fw_info info;
7530 const __be32 *fw_data;
7531 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7534 if (tg3_flag(tp, HW_TSO_1) ||
7535 tg3_flag(tp, HW_TSO_2) ||
7536 tg3_flag(tp, HW_TSO_3))
7539 fw_data = (void *)tp->fw->data;
7541 /* Firmware blob starts with version numbers, followed by
7542 start address and length. We are setting complete length.
7543 length = end_address_of_bss - start_address_of_text.
7544 Remainder is the blob to be loaded contiguously
7545 from start address. */
7547 info.fw_base = be32_to_cpu(fw_data[1]);
7548 cpu_scratch_size = tp->fw_len;
7549 info.fw_len = tp->fw->size - 12;
7550 info.fw_data = &fw_data[3];
7552 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7553 cpu_base = RX_CPU_BASE;
7554 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7556 cpu_base = TX_CPU_BASE;
7557 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7558 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7561 err = tg3_load_firmware_cpu(tp, cpu_base,
7562 cpu_scratch_base, cpu_scratch_size,
7567 /* Now startup the cpu. */
7568 tw32(cpu_base + CPU_STATE, 0xffffffff);
7569 tw32_f(cpu_base + CPU_PC, info.fw_base);
7571 for (i = 0; i < 5; i++) {
7572 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7574 tw32(cpu_base + CPU_STATE, 0xffffffff);
7575 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7576 tw32_f(cpu_base + CPU_PC, info.fw_base);
7581 "%s fails to set CPU PC, is %08x should be %08x\n",
7582 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7585 tw32(cpu_base + CPU_STATE, 0xffffffff);
7586 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7591 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7593 struct tg3 *tp = netdev_priv(dev);
7594 struct sockaddr *addr = p;
7595 int err = 0, skip_mac_1 = 0;
7597 if (!is_valid_ether_addr(addr->sa_data))
7600 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7602 if (!netif_running(dev))
7605 if (tg3_flag(tp, ENABLE_ASF)) {
7606 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7608 addr0_high = tr32(MAC_ADDR_0_HIGH);
7609 addr0_low = tr32(MAC_ADDR_0_LOW);
7610 addr1_high = tr32(MAC_ADDR_1_HIGH);
7611 addr1_low = tr32(MAC_ADDR_1_LOW);
7613 /* Skip MAC addr 1 if ASF is using it. */
7614 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7615 !(addr1_high == 0 && addr1_low == 0))
7618 spin_lock_bh(&tp->lock);
7619 __tg3_set_mac_addr(tp, skip_mac_1);
7620 spin_unlock_bh(&tp->lock);
7625 /* tp->lock is held. */
7626 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7627 dma_addr_t mapping, u32 maxlen_flags,
7631 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7632 ((u64) mapping >> 32));
7634 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7635 ((u64) mapping & 0xffffffff));
7637 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7640 if (!tg3_flag(tp, 5705_PLUS))
7642 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7646 static void __tg3_set_rx_mode(struct net_device *);
7647 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7651 if (!tg3_flag(tp, ENABLE_TSS)) {
7652 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7653 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7654 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7656 tw32(HOSTCC_TXCOL_TICKS, 0);
7657 tw32(HOSTCC_TXMAX_FRAMES, 0);
7658 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7661 if (!tg3_flag(tp, ENABLE_RSS)) {
7662 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7663 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7664 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7666 tw32(HOSTCC_RXCOL_TICKS, 0);
7667 tw32(HOSTCC_RXMAX_FRAMES, 0);
7668 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7671 if (!tg3_flag(tp, 5705_PLUS)) {
7672 u32 val = ec->stats_block_coalesce_usecs;
7674 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7675 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7677 if (!netif_carrier_ok(tp->dev))
7680 tw32(HOSTCC_STAT_COAL_TICKS, val);
7683 for (i = 0; i < tp->irq_cnt - 1; i++) {
7686 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7687 tw32(reg, ec->rx_coalesce_usecs);
7688 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7689 tw32(reg, ec->rx_max_coalesced_frames);
7690 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7691 tw32(reg, ec->rx_max_coalesced_frames_irq);
7693 if (tg3_flag(tp, ENABLE_TSS)) {
7694 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7695 tw32(reg, ec->tx_coalesce_usecs);
7696 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7697 tw32(reg, ec->tx_max_coalesced_frames);
7698 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7699 tw32(reg, ec->tx_max_coalesced_frames_irq);
7703 for (; i < tp->irq_max - 1; i++) {
7704 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7705 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7706 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7708 if (tg3_flag(tp, ENABLE_TSS)) {
7709 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7710 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7711 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7716 /* tp->lock is held. */
7717 static void tg3_rings_reset(struct tg3 *tp)
7720 u32 stblk, txrcb, rxrcb, limit;
7721 struct tg3_napi *tnapi = &tp->napi[0];
7723 /* Disable all transmit rings but the first. */
7724 if (!tg3_flag(tp, 5705_PLUS))
7725 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7726 else if (tg3_flag(tp, 5717_PLUS))
7727 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7728 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7729 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7731 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7733 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7734 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7735 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7736 BDINFO_FLAGS_DISABLED);
7739 /* Disable all receive return rings but the first. */
7740 if (tg3_flag(tp, 5717_PLUS))
7741 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7742 else if (!tg3_flag(tp, 5705_PLUS))
7743 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7744 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7745 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7746 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7748 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7750 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7751 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7752 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7753 BDINFO_FLAGS_DISABLED);
7755 /* Disable interrupts */
7756 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7757 tp->napi[0].chk_msi_cnt = 0;
7758 tp->napi[0].last_rx_cons = 0;
7759 tp->napi[0].last_tx_cons = 0;
7761 /* Zero mailbox registers. */
7762 if (tg3_flag(tp, SUPPORT_MSIX)) {
7763 for (i = 1; i < tp->irq_max; i++) {
7764 tp->napi[i].tx_prod = 0;
7765 tp->napi[i].tx_cons = 0;
7766 if (tg3_flag(tp, ENABLE_TSS))
7767 tw32_mailbox(tp->napi[i].prodmbox, 0);
7768 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7769 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7770 tp->napi[0].chk_msi_cnt = 0;
7771 tp->napi[i].last_rx_cons = 0;
7772 tp->napi[i].last_tx_cons = 0;
7774 if (!tg3_flag(tp, ENABLE_TSS))
7775 tw32_mailbox(tp->napi[0].prodmbox, 0);
7777 tp->napi[0].tx_prod = 0;
7778 tp->napi[0].tx_cons = 0;
7779 tw32_mailbox(tp->napi[0].prodmbox, 0);
7780 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7783 /* Make sure the NIC-based send BD rings are disabled. */
7784 if (!tg3_flag(tp, 5705_PLUS)) {
7785 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7786 for (i = 0; i < 16; i++)
7787 tw32_tx_mbox(mbox + i * 8, 0);
7790 txrcb = NIC_SRAM_SEND_RCB;
7791 rxrcb = NIC_SRAM_RCV_RET_RCB;
7793 /* Clear status block in ram. */
7794 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7796 /* Set status block DMA address */
7797 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7798 ((u64) tnapi->status_mapping >> 32));
7799 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7800 ((u64) tnapi->status_mapping & 0xffffffff));
7802 if (tnapi->tx_ring) {
7803 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7804 (TG3_TX_RING_SIZE <<
7805 BDINFO_FLAGS_MAXLEN_SHIFT),
7806 NIC_SRAM_TX_BUFFER_DESC);
7807 txrcb += TG3_BDINFO_SIZE;
7810 if (tnapi->rx_rcb) {
7811 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7812 (tp->rx_ret_ring_mask + 1) <<
7813 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7814 rxrcb += TG3_BDINFO_SIZE;
7817 stblk = HOSTCC_STATBLCK_RING1;
7819 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7820 u64 mapping = (u64)tnapi->status_mapping;
7821 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7822 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7824 /* Clear status block in ram. */
7825 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7827 if (tnapi->tx_ring) {
7828 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7829 (TG3_TX_RING_SIZE <<
7830 BDINFO_FLAGS_MAXLEN_SHIFT),
7831 NIC_SRAM_TX_BUFFER_DESC);
7832 txrcb += TG3_BDINFO_SIZE;
7835 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7836 ((tp->rx_ret_ring_mask + 1) <<
7837 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7840 rxrcb += TG3_BDINFO_SIZE;
7844 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7846 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7848 if (!tg3_flag(tp, 5750_PLUS) ||
7849 tg3_flag(tp, 5780_CLASS) ||
7850 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7852 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7853 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7854 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7855 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7857 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7859 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7860 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7862 val = min(nic_rep_thresh, host_rep_thresh);
7863 tw32(RCVBDI_STD_THRESH, val);
7865 if (tg3_flag(tp, 57765_PLUS))
7866 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7868 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7871 if (!tg3_flag(tp, 5705_PLUS))
7872 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7874 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7876 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7878 val = min(bdcache_maxcnt / 2, host_rep_thresh);
7879 tw32(RCVBDI_JUMBO_THRESH, val);
7881 if (tg3_flag(tp, 57765_PLUS))
7882 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7885 /* tp->lock is held. */
7886 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7888 u32 val, rdmac_mode;
7890 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7892 tg3_disable_ints(tp);
7896 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7898 if (tg3_flag(tp, INIT_COMPLETE))
7899 tg3_abort_hw(tp, 1);
7901 /* Enable MAC control of LPI */
7902 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7903 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7904 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7905 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7907 tw32_f(TG3_CPMU_EEE_CTRL,
7908 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7910 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7911 TG3_CPMU_EEEMD_LPI_IN_TX |
7912 TG3_CPMU_EEEMD_LPI_IN_RX |
7913 TG3_CPMU_EEEMD_EEE_ENABLE;
7915 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7916 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7918 if (tg3_flag(tp, ENABLE_APE))
7919 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7921 tw32_f(TG3_CPMU_EEE_MODE, val);
7923 tw32_f(TG3_CPMU_EEE_DBTMR1,
7924 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7925 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7927 tw32_f(TG3_CPMU_EEE_DBTMR2,
7928 TG3_CPMU_DBTMR2_APE_TX_2047US |
7929 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7935 err = tg3_chip_reset(tp);
7939 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7941 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7942 val = tr32(TG3_CPMU_CTRL);
7943 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7944 tw32(TG3_CPMU_CTRL, val);
7946 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7947 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7948 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7949 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7951 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7952 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7953 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7954 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7956 val = tr32(TG3_CPMU_HST_ACC);
7957 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7958 val |= CPMU_HST_ACC_MACCLK_6_25;
7959 tw32(TG3_CPMU_HST_ACC, val);
7962 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7963 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7964 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7965 PCIE_PWR_MGMT_L1_THRESH_4MS;
7966 tw32(PCIE_PWR_MGMT_THRESH, val);
7968 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7969 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7971 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7973 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7974 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7977 if (tg3_flag(tp, L1PLLPD_EN)) {
7978 u32 grc_mode = tr32(GRC_MODE);
7980 /* Access the lower 1K of PL PCIE block registers. */
7981 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7982 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7984 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7985 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7986 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7988 tw32(GRC_MODE, grc_mode);
7991 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7992 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7993 u32 grc_mode = tr32(GRC_MODE);
7995 /* Access the lower 1K of PL PCIE block registers. */
7996 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7997 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7999 val = tr32(TG3_PCIE_TLDLPL_PORT +
8000 TG3_PCIE_PL_LO_PHYCTL5);
8001 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8002 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8004 tw32(GRC_MODE, grc_mode);
8007 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8008 u32 grc_mode = tr32(GRC_MODE);
8010 /* Access the lower 1K of DL PCIE block registers. */
8011 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8012 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8014 val = tr32(TG3_PCIE_TLDLPL_PORT +
8015 TG3_PCIE_DL_LO_FTSMAX);
8016 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8017 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8018 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8020 tw32(GRC_MODE, grc_mode);
8023 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8024 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8025 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8026 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8029 /* This works around an issue with Athlon chipsets on
8030 * B3 tigon3 silicon. This bit has no effect on any
8031 * other revision. But do not set this on PCI Express
8032 * chips and don't even touch the clocks if the CPMU is present.
8034 if (!tg3_flag(tp, CPMU_PRESENT)) {
8035 if (!tg3_flag(tp, PCI_EXPRESS))
8036 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8037 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8040 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8041 tg3_flag(tp, PCIX_MODE)) {
8042 val = tr32(TG3PCI_PCISTATE);
8043 val |= PCISTATE_RETRY_SAME_DMA;
8044 tw32(TG3PCI_PCISTATE, val);
8047 if (tg3_flag(tp, ENABLE_APE)) {
8048 /* Allow reads and writes to the
8049 * APE register and memory space.
8051 val = tr32(TG3PCI_PCISTATE);
8052 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8053 PCISTATE_ALLOW_APE_SHMEM_WR |
8054 PCISTATE_ALLOW_APE_PSPACE_WR;
8055 tw32(TG3PCI_PCISTATE, val);
8058 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8059 /* Enable some hw fixes. */
8060 val = tr32(TG3PCI_MSI_DATA);
8061 val |= (1 << 26) | (1 << 28) | (1 << 29);
8062 tw32(TG3PCI_MSI_DATA, val);
8065 /* Descriptor ring init may make accesses to the
8066 * NIC SRAM area to setup the TX descriptors, so we
8067 * can only do this after the hardware has been
8068 * successfully reset.
8070 err = tg3_init_rings(tp);
8074 if (tg3_flag(tp, 57765_PLUS)) {
8075 val = tr32(TG3PCI_DMA_RW_CTRL) &
8076 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8077 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8078 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8079 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8080 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8081 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8082 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8083 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8084 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8085 /* This value is determined during the probe time DMA
8086 * engine test, tg3_test_dma.
8088 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8091 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8092 GRC_MODE_4X_NIC_SEND_RINGS |
8093 GRC_MODE_NO_TX_PHDR_CSUM |
8094 GRC_MODE_NO_RX_PHDR_CSUM);
8095 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8097 /* Pseudo-header checksum is done by hardware logic and not
8098 * the offload processers, so make the chip do the pseudo-
8099 * header checksums on receive. For transmit it is more
8100 * convenient to do the pseudo-header checksum in software
8101 * as Linux does that on transmit for us in all cases.
8103 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8107 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8109 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8110 val = tr32(GRC_MISC_CFG);
8112 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8113 tw32(GRC_MISC_CFG, val);
8115 /* Initialize MBUF/DESC pool. */
8116 if (tg3_flag(tp, 5750_PLUS)) {
8118 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8119 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8120 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8121 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8123 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8124 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8125 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8126 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8129 fw_len = tp->fw_len;
8130 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8131 tw32(BUFMGR_MB_POOL_ADDR,
8132 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8133 tw32(BUFMGR_MB_POOL_SIZE,
8134 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8137 if (tp->dev->mtu <= ETH_DATA_LEN) {
8138 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8139 tp->bufmgr_config.mbuf_read_dma_low_water);
8140 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8141 tp->bufmgr_config.mbuf_mac_rx_low_water);
8142 tw32(BUFMGR_MB_HIGH_WATER,
8143 tp->bufmgr_config.mbuf_high_water);
8145 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8146 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8147 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8148 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8149 tw32(BUFMGR_MB_HIGH_WATER,
8150 tp->bufmgr_config.mbuf_high_water_jumbo);
8152 tw32(BUFMGR_DMA_LOW_WATER,
8153 tp->bufmgr_config.dma_low_water);
8154 tw32(BUFMGR_DMA_HIGH_WATER,
8155 tp->bufmgr_config.dma_high_water);
8157 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8158 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8159 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8161 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8162 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8163 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8164 tw32(BUFMGR_MODE, val);
8165 for (i = 0; i < 2000; i++) {
8166 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8171 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8175 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8176 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8178 tg3_setup_rxbd_thresholds(tp);
8180 /* Initialize TG3_BDINFO's at:
8181 * RCVDBDI_STD_BD: standard eth size rx ring
8182 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8183 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8186 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8187 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8188 * ring attribute flags
8189 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8191 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8192 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8194 * The size of each ring is fixed in the firmware, but the location is
8197 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8198 ((u64) tpr->rx_std_mapping >> 32));
8199 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8200 ((u64) tpr->rx_std_mapping & 0xffffffff));
8201 if (!tg3_flag(tp, 5717_PLUS))
8202 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8203 NIC_SRAM_RX_BUFFER_DESC);
8205 /* Disable the mini ring */
8206 if (!tg3_flag(tp, 5705_PLUS))
8207 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8208 BDINFO_FLAGS_DISABLED);
8210 /* Program the jumbo buffer descriptor ring control
8211 * blocks on those devices that have them.
8213 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8214 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8216 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8217 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8218 ((u64) tpr->rx_jmb_mapping >> 32));
8219 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8220 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8221 val = TG3_RX_JMB_RING_SIZE(tp) <<
8222 BDINFO_FLAGS_MAXLEN_SHIFT;
8223 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8224 val | BDINFO_FLAGS_USE_EXT_RECV);
8225 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8227 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8228 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8230 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8231 BDINFO_FLAGS_DISABLED);
8234 if (tg3_flag(tp, 57765_PLUS)) {
8235 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8236 val = TG3_RX_STD_MAX_SIZE_5700;
8238 val = TG3_RX_STD_MAX_SIZE_5717;
8239 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8240 val |= (TG3_RX_STD_DMA_SZ << 2);
8242 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8244 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8246 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8248 tpr->rx_std_prod_idx = tp->rx_pending;
8249 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8251 tpr->rx_jmb_prod_idx =
8252 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8253 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8255 tg3_rings_reset(tp);
8257 /* Initialize MAC address and backoff seed. */
8258 __tg3_set_mac_addr(tp, 0);
8260 /* MTU + ethernet header + FCS + optional VLAN tag */
8261 tw32(MAC_RX_MTU_SIZE,
8262 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8264 /* The slot time is changed by tg3_setup_phy if we
8265 * run at gigabit with half duplex.
8267 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8268 (6 << TX_LENGTHS_IPG_SHIFT) |
8269 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8271 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8272 val |= tr32(MAC_TX_LENGTHS) &
8273 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8274 TX_LENGTHS_CNT_DWN_VAL_MSK);
8276 tw32(MAC_TX_LENGTHS, val);
8278 /* Receive rules. */
8279 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8280 tw32(RCVLPC_CONFIG, 0x0181);
8282 /* Calculate RDMAC_MODE setting early, we need it to determine
8283 * the RCVLPC_STATE_ENABLE mask.
8285 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8286 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8287 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8288 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8289 RDMAC_MODE_LNGREAD_ENAB);
8291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8292 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8294 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8295 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8297 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8298 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8299 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8302 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8303 if (tg3_flag(tp, TSO_CAPABLE) &&
8304 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8305 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8306 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8307 !tg3_flag(tp, IS_5788)) {
8308 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8312 if (tg3_flag(tp, PCI_EXPRESS))
8313 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8315 if (tg3_flag(tp, HW_TSO_1) ||
8316 tg3_flag(tp, HW_TSO_2) ||
8317 tg3_flag(tp, HW_TSO_3))
8318 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8320 if (tg3_flag(tp, 57765_PLUS) ||
8321 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8322 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8323 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8326 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8328 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8329 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8330 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8332 tg3_flag(tp, 57765_PLUS)) {
8333 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8334 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8335 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8336 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8337 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8338 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8339 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8340 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8341 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8343 tw32(TG3_RDMA_RSRVCTRL_REG,
8344 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8347 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8349 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8350 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8351 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8352 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8355 /* Receive/send statistics. */
8356 if (tg3_flag(tp, 5750_PLUS)) {
8357 val = tr32(RCVLPC_STATS_ENABLE);
8358 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8359 tw32(RCVLPC_STATS_ENABLE, val);
8360 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8361 tg3_flag(tp, TSO_CAPABLE)) {
8362 val = tr32(RCVLPC_STATS_ENABLE);
8363 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8364 tw32(RCVLPC_STATS_ENABLE, val);
8366 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8368 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8369 tw32(SNDDATAI_STATSENAB, 0xffffff);
8370 tw32(SNDDATAI_STATSCTRL,
8371 (SNDDATAI_SCTRL_ENABLE |
8372 SNDDATAI_SCTRL_FASTUPD));
8374 /* Setup host coalescing engine. */
8375 tw32(HOSTCC_MODE, 0);
8376 for (i = 0; i < 2000; i++) {
8377 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8382 __tg3_set_coalesce(tp, &tp->coal);
8384 if (!tg3_flag(tp, 5705_PLUS)) {
8385 /* Status/statistics block address. See tg3_timer,
8386 * the tg3_periodic_fetch_stats call there, and
8387 * tg3_get_stats to see how this works for 5705/5750 chips.
8389 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8390 ((u64) tp->stats_mapping >> 32));
8391 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8392 ((u64) tp->stats_mapping & 0xffffffff));
8393 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8395 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8397 /* Clear statistics and status block memory areas */
8398 for (i = NIC_SRAM_STATS_BLK;
8399 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8401 tg3_write_mem(tp, i, 0);
8406 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8408 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8409 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8410 if (!tg3_flag(tp, 5705_PLUS))
8411 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8413 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8414 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8415 /* reset to prevent losing 1st rx packet intermittently */
8416 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8420 if (tg3_flag(tp, ENABLE_APE))
8421 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8424 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8425 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8426 if (!tg3_flag(tp, 5705_PLUS) &&
8427 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8428 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8429 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8430 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8433 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8434 * If TG3_FLAG_IS_NIC is zero, we should read the
8435 * register to preserve the GPIO settings for LOMs. The GPIOs,
8436 * whether used as inputs or outputs, are set by boot code after
8439 if (!tg3_flag(tp, IS_NIC)) {
8442 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8443 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8444 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8446 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8447 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8448 GRC_LCLCTRL_GPIO_OUTPUT3;
8450 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8451 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8453 tp->grc_local_ctrl &= ~gpio_mask;
8454 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8456 /* GPIO1 must be driven high for eeprom write protect */
8457 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8458 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8459 GRC_LCLCTRL_GPIO_OUTPUT1);
8461 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8464 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8465 val = tr32(MSGINT_MODE);
8466 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8467 tw32(MSGINT_MODE, val);
8470 if (!tg3_flag(tp, 5705_PLUS)) {
8471 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8475 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8476 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8477 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8478 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8479 WDMAC_MODE_LNGREAD_ENAB);
8481 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8482 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8483 if (tg3_flag(tp, TSO_CAPABLE) &&
8484 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8485 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8487 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8488 !tg3_flag(tp, IS_5788)) {
8489 val |= WDMAC_MODE_RX_ACCEL;
8493 /* Enable host coalescing bug fix */
8494 if (tg3_flag(tp, 5755_PLUS))
8495 val |= WDMAC_MODE_STATUS_TAG_FIX;
8497 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8498 val |= WDMAC_MODE_BURST_ALL_DATA;
8500 tw32_f(WDMAC_MODE, val);
8503 if (tg3_flag(tp, PCIX_MODE)) {
8506 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8508 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8509 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8510 pcix_cmd |= PCI_X_CMD_READ_2K;
8511 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8512 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8513 pcix_cmd |= PCI_X_CMD_READ_2K;
8515 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8519 tw32_f(RDMAC_MODE, rdmac_mode);
8522 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8523 if (!tg3_flag(tp, 5705_PLUS))
8524 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8526 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8528 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8530 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8532 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8533 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8534 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8535 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8536 val |= RCVDBDI_MODE_LRG_RING_SZ;
8537 tw32(RCVDBDI_MODE, val);
8538 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8539 if (tg3_flag(tp, HW_TSO_1) ||
8540 tg3_flag(tp, HW_TSO_2) ||
8541 tg3_flag(tp, HW_TSO_3))
8542 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8543 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8544 if (tg3_flag(tp, ENABLE_TSS))
8545 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8546 tw32(SNDBDI_MODE, val);
8547 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8549 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8550 err = tg3_load_5701_a0_firmware_fix(tp);
8555 if (tg3_flag(tp, TSO_CAPABLE)) {
8556 err = tg3_load_tso_firmware(tp);
8561 tp->tx_mode = TX_MODE_ENABLE;
8563 if (tg3_flag(tp, 5755_PLUS) ||
8564 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8565 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8568 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8569 tp->tx_mode &= ~val;
8570 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8573 tw32_f(MAC_TX_MODE, tp->tx_mode);
8576 if (tg3_flag(tp, ENABLE_RSS)) {
8577 u32 reg = MAC_RSS_INDIR_TBL_0;
8578 u8 *ent = (u8 *)&val;
8580 /* Setup the indirection table */
8581 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8582 int idx = i % sizeof(val);
8584 ent[idx] = i % (tp->irq_cnt - 1);
8585 if (idx == sizeof(val) - 1) {
8591 /* Setup the "secret" hash key. */
8592 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8593 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8594 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8595 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8596 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8597 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8598 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8599 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8600 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8601 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8604 tp->rx_mode = RX_MODE_ENABLE;
8605 if (tg3_flag(tp, 5755_PLUS))
8606 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8608 if (tg3_flag(tp, ENABLE_RSS))
8609 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8610 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8611 RX_MODE_RSS_IPV6_HASH_EN |
8612 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8613 RX_MODE_RSS_IPV4_HASH_EN |
8614 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8616 tw32_f(MAC_RX_MODE, tp->rx_mode);
8619 tw32(MAC_LED_CTRL, tp->led_ctrl);
8621 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8622 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8623 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8626 tw32_f(MAC_RX_MODE, tp->rx_mode);
8629 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8630 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8631 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8632 /* Set drive transmission level to 1.2V */
8633 /* only if the signal pre-emphasis bit is not set */
8634 val = tr32(MAC_SERDES_CFG);
8637 tw32(MAC_SERDES_CFG, val);
8639 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8640 tw32(MAC_SERDES_CFG, 0x616000);
8643 /* Prevent chip from dropping frames when flow control
8646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8650 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8652 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8653 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8654 /* Use hardware link auto-negotiation */
8655 tg3_flag_set(tp, HW_AUTONEG);
8658 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8659 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8662 tmp = tr32(SERDES_RX_CTRL);
8663 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8664 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8665 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8666 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8669 if (!tg3_flag(tp, USE_PHYLIB)) {
8670 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8671 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8672 tp->link_config.speed = tp->link_config.orig_speed;
8673 tp->link_config.duplex = tp->link_config.orig_duplex;
8674 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8677 err = tg3_setup_phy(tp, 0);
8681 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8682 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8685 /* Clear CRC stats. */
8686 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8687 tg3_writephy(tp, MII_TG3_TEST1,
8688 tmp | MII_TG3_TEST1_CRC_EN);
8689 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8694 __tg3_set_rx_mode(tp->dev);
8696 /* Initialize receive rules. */
8697 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8698 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8699 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8700 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8702 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8706 if (tg3_flag(tp, ENABLE_ASF))
8710 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8712 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8714 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8716 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8718 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8720 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8722 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8724 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8726 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8728 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8730 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8732 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8734 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8736 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8744 if (tg3_flag(tp, ENABLE_APE))
8745 /* Write our heartbeat update interval to APE. */
8746 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8747 APE_HOST_HEARTBEAT_INT_DISABLE);
8749 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8754 /* Called at device open time to get the chip ready for
8755 * packet processing. Invoked with tp->lock held.
8757 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8759 tg3_switch_clocks(tp);
8761 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8763 return tg3_reset_hw(tp, reset_phy);
8766 #define TG3_STAT_ADD32(PSTAT, REG) \
8767 do { u32 __val = tr32(REG); \
8768 (PSTAT)->low += __val; \
8769 if ((PSTAT)->low < __val) \
8770 (PSTAT)->high += 1; \
8773 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8775 struct tg3_hw_stats *sp = tp->hw_stats;
8777 if (!netif_carrier_ok(tp->dev))
8780 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8781 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8782 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8783 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8784 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8785 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8786 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8787 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8788 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8789 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8790 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8791 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8792 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8794 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8795 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8796 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8797 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8798 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8799 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8800 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8801 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8802 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8803 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8804 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8805 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8806 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8807 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8809 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8810 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8811 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8812 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8813 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8815 u32 val = tr32(HOSTCC_FLOW_ATTN);
8816 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8818 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8819 sp->rx_discards.low += val;
8820 if (sp->rx_discards.low < val)
8821 sp->rx_discards.high += 1;
8823 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8825 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8828 static void tg3_chk_missed_msi(struct tg3 *tp)
8832 for (i = 0; i < tp->irq_cnt; i++) {
8833 struct tg3_napi *tnapi = &tp->napi[i];
8835 if (tg3_has_work(tnapi)) {
8836 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8837 tnapi->last_tx_cons == tnapi->tx_cons) {
8838 if (tnapi->chk_msi_cnt < 1) {
8839 tnapi->chk_msi_cnt++;
8842 tw32_mailbox(tnapi->int_mbox,
8843 tnapi->last_tag << 24);
8846 tnapi->chk_msi_cnt = 0;
8847 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
8848 tnapi->last_tx_cons = tnapi->tx_cons;
8852 static void tg3_timer(unsigned long __opaque)
8854 struct tg3 *tp = (struct tg3 *) __opaque;
8859 spin_lock(&tp->lock);
8861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8863 tg3_chk_missed_msi(tp);
8865 if (!tg3_flag(tp, TAGGED_STATUS)) {
8866 /* All of this garbage is because when using non-tagged
8867 * IRQ status the mailbox/status_block protocol the chip
8868 * uses with the cpu is race prone.
8870 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8871 tw32(GRC_LOCAL_CTRL,
8872 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8874 tw32(HOSTCC_MODE, tp->coalesce_mode |
8875 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8878 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8879 tg3_flag_set(tp, RESTART_TIMER);
8880 spin_unlock(&tp->lock);
8881 schedule_work(&tp->reset_task);
8886 /* This part only runs once per second. */
8887 if (!--tp->timer_counter) {
8888 if (tg3_flag(tp, 5705_PLUS))
8889 tg3_periodic_fetch_stats(tp);
8891 if (tp->setlpicnt && !--tp->setlpicnt)
8892 tg3_phy_eee_enable(tp);
8894 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8898 mac_stat = tr32(MAC_STATUS);
8901 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8902 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8904 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8908 tg3_setup_phy(tp, 0);
8909 } else if (tg3_flag(tp, POLL_SERDES)) {
8910 u32 mac_stat = tr32(MAC_STATUS);
8913 if (netif_carrier_ok(tp->dev) &&
8914 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8917 if (!netif_carrier_ok(tp->dev) &&
8918 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8919 MAC_STATUS_SIGNAL_DET))) {
8923 if (!tp->serdes_counter) {
8926 ~MAC_MODE_PORT_MODE_MASK));
8928 tw32_f(MAC_MODE, tp->mac_mode);
8931 tg3_setup_phy(tp, 0);
8933 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8934 tg3_flag(tp, 5780_CLASS)) {
8935 tg3_serdes_parallel_detect(tp);
8938 tp->timer_counter = tp->timer_multiplier;
8941 /* Heartbeat is only sent once every 2 seconds.
8943 * The heartbeat is to tell the ASF firmware that the host
8944 * driver is still alive. In the event that the OS crashes,
8945 * ASF needs to reset the hardware to free up the FIFO space
8946 * that may be filled with rx packets destined for the host.
8947 * If the FIFO is full, ASF will no longer function properly.
8949 * Unintended resets have been reported on real time kernels
8950 * where the timer doesn't run on time. Netpoll will also have
8953 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8954 * to check the ring condition when the heartbeat is expiring
8955 * before doing the reset. This will prevent most unintended
8958 if (!--tp->asf_counter) {
8959 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8960 tg3_wait_for_event_ack(tp);
8962 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8963 FWCMD_NICDRV_ALIVE3);
8964 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8965 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8966 TG3_FW_UPDATE_TIMEOUT_SEC);
8968 tg3_generate_fw_event(tp);
8970 tp->asf_counter = tp->asf_multiplier;
8973 spin_unlock(&tp->lock);
8976 tp->timer.expires = jiffies + tp->timer_offset;
8977 add_timer(&tp->timer);
8980 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8983 unsigned long flags;
8985 struct tg3_napi *tnapi = &tp->napi[irq_num];
8987 if (tp->irq_cnt == 1)
8988 name = tp->dev->name;
8990 name = &tnapi->irq_lbl[0];
8991 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8992 name[IFNAMSIZ-1] = 0;
8995 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8997 if (tg3_flag(tp, 1SHOT_MSI))
9002 if (tg3_flag(tp, TAGGED_STATUS))
9003 fn = tg3_interrupt_tagged;
9004 flags = IRQF_SHARED;
9007 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9010 static int tg3_test_interrupt(struct tg3 *tp)
9012 struct tg3_napi *tnapi = &tp->napi[0];
9013 struct net_device *dev = tp->dev;
9014 int err, i, intr_ok = 0;
9017 if (!netif_running(dev))
9020 tg3_disable_ints(tp);
9022 free_irq(tnapi->irq_vec, tnapi);
9025 * Turn off MSI one shot mode. Otherwise this test has no
9026 * observable way to know whether the interrupt was delivered.
9028 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9029 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9030 tw32(MSGINT_MODE, val);
9033 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9034 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9038 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9039 tg3_enable_ints(tp);
9041 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9044 for (i = 0; i < 5; i++) {
9045 u32 int_mbox, misc_host_ctrl;
9047 int_mbox = tr32_mailbox(tnapi->int_mbox);
9048 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9050 if ((int_mbox != 0) ||
9051 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9059 tg3_disable_ints(tp);
9061 free_irq(tnapi->irq_vec, tnapi);
9063 err = tg3_request_irq(tp, 0);
9069 /* Reenable MSI one shot mode. */
9070 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9071 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9072 tw32(MSGINT_MODE, val);
9080 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9081 * successfully restored
9083 static int tg3_test_msi(struct tg3 *tp)
9088 if (!tg3_flag(tp, USING_MSI))
9091 /* Turn off SERR reporting in case MSI terminates with Master
9094 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9095 pci_write_config_word(tp->pdev, PCI_COMMAND,
9096 pci_cmd & ~PCI_COMMAND_SERR);
9098 err = tg3_test_interrupt(tp);
9100 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9105 /* other failures */
9109 /* MSI test failed, go back to INTx mode */
9110 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9111 "to INTx mode. Please report this failure to the PCI "
9112 "maintainer and include system chipset information\n");
9114 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9116 pci_disable_msi(tp->pdev);
9118 tg3_flag_clear(tp, USING_MSI);
9119 tp->napi[0].irq_vec = tp->pdev->irq;
9121 err = tg3_request_irq(tp, 0);
9125 /* Need to reset the chip because the MSI cycle may have terminated
9126 * with Master Abort.
9128 tg3_full_lock(tp, 1);
9130 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9131 err = tg3_init_hw(tp, 1);
9133 tg3_full_unlock(tp);
9136 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9141 static int tg3_request_firmware(struct tg3 *tp)
9143 const __be32 *fw_data;
9145 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9146 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9151 fw_data = (void *)tp->fw->data;
9153 /* Firmware blob starts with version numbers, followed by
9154 * start address and _full_ length including BSS sections
9155 * (which must be longer than the actual data, of course
9158 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9159 if (tp->fw_len < (tp->fw->size - 12)) {
9160 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9161 tp->fw_len, tp->fw_needed);
9162 release_firmware(tp->fw);
9167 /* We no longer need firmware; we have it. */
9168 tp->fw_needed = NULL;
9172 static bool tg3_enable_msix(struct tg3 *tp)
9174 int i, rc, cpus = num_online_cpus();
9175 struct msix_entry msix_ent[tp->irq_max];
9178 /* Just fallback to the simpler MSI mode. */
9182 * We want as many rx rings enabled as there are cpus.
9183 * The first MSIX vector only deals with link interrupts, etc,
9184 * so we add one to the number of vectors we are requesting.
9186 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9188 for (i = 0; i < tp->irq_max; i++) {
9189 msix_ent[i].entry = i;
9190 msix_ent[i].vector = 0;
9193 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9196 } else if (rc != 0) {
9197 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9199 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9204 for (i = 0; i < tp->irq_max; i++)
9205 tp->napi[i].irq_vec = msix_ent[i].vector;
9207 netif_set_real_num_tx_queues(tp->dev, 1);
9208 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9209 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9210 pci_disable_msix(tp->pdev);
9214 if (tp->irq_cnt > 1) {
9215 tg3_flag_set(tp, ENABLE_RSS);
9217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9218 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9219 tg3_flag_set(tp, ENABLE_TSS);
9220 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9227 static void tg3_ints_init(struct tg3 *tp)
9229 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9230 !tg3_flag(tp, TAGGED_STATUS)) {
9231 /* All MSI supporting chips should support tagged
9232 * status. Assert that this is the case.
9234 netdev_warn(tp->dev,
9235 "MSI without TAGGED_STATUS? Not using MSI\n");
9239 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9240 tg3_flag_set(tp, USING_MSIX);
9241 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9242 tg3_flag_set(tp, USING_MSI);
9244 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9245 u32 msi_mode = tr32(MSGINT_MODE);
9246 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9247 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9248 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9251 if (!tg3_flag(tp, USING_MSIX)) {
9253 tp->napi[0].irq_vec = tp->pdev->irq;
9254 netif_set_real_num_tx_queues(tp->dev, 1);
9255 netif_set_real_num_rx_queues(tp->dev, 1);
9259 static void tg3_ints_fini(struct tg3 *tp)
9261 if (tg3_flag(tp, USING_MSIX))
9262 pci_disable_msix(tp->pdev);
9263 else if (tg3_flag(tp, USING_MSI))
9264 pci_disable_msi(tp->pdev);
9265 tg3_flag_clear(tp, USING_MSI);
9266 tg3_flag_clear(tp, USING_MSIX);
9267 tg3_flag_clear(tp, ENABLE_RSS);
9268 tg3_flag_clear(tp, ENABLE_TSS);
9271 static int tg3_open(struct net_device *dev)
9273 struct tg3 *tp = netdev_priv(dev);
9276 if (tp->fw_needed) {
9277 err = tg3_request_firmware(tp);
9278 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9282 netdev_warn(tp->dev, "TSO capability disabled\n");
9283 tg3_flag_clear(tp, TSO_CAPABLE);
9284 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9285 netdev_notice(tp->dev, "TSO capability restored\n");
9286 tg3_flag_set(tp, TSO_CAPABLE);
9290 netif_carrier_off(tp->dev);
9292 err = tg3_power_up(tp);
9296 tg3_full_lock(tp, 0);
9298 tg3_disable_ints(tp);
9299 tg3_flag_clear(tp, INIT_COMPLETE);
9301 tg3_full_unlock(tp);
9304 * Setup interrupts first so we know how
9305 * many NAPI resources to allocate
9309 /* The placement of this call is tied
9310 * to the setup and use of Host TX descriptors.
9312 err = tg3_alloc_consistent(tp);
9318 tg3_napi_enable(tp);
9320 for (i = 0; i < tp->irq_cnt; i++) {
9321 struct tg3_napi *tnapi = &tp->napi[i];
9322 err = tg3_request_irq(tp, i);
9324 for (i--; i >= 0; i--)
9325 free_irq(tnapi->irq_vec, tnapi);
9333 tg3_full_lock(tp, 0);
9335 err = tg3_init_hw(tp, 1);
9337 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9340 if (tg3_flag(tp, TAGGED_STATUS) &&
9341 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9342 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9343 tp->timer_offset = HZ;
9345 tp->timer_offset = HZ / 10;
9347 BUG_ON(tp->timer_offset > HZ);
9348 tp->timer_counter = tp->timer_multiplier =
9349 (HZ / tp->timer_offset);
9350 tp->asf_counter = tp->asf_multiplier =
9351 ((HZ / tp->timer_offset) * 2);
9353 init_timer(&tp->timer);
9354 tp->timer.expires = jiffies + tp->timer_offset;
9355 tp->timer.data = (unsigned long) tp;
9356 tp->timer.function = tg3_timer;
9359 tg3_full_unlock(tp);
9364 if (tg3_flag(tp, USING_MSI)) {
9365 err = tg3_test_msi(tp);
9368 tg3_full_lock(tp, 0);
9369 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9371 tg3_full_unlock(tp);
9376 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9377 u32 val = tr32(PCIE_TRANSACTION_CFG);
9379 tw32(PCIE_TRANSACTION_CFG,
9380 val | PCIE_TRANS_CFG_1SHOT_MSI);
9386 tg3_full_lock(tp, 0);
9388 add_timer(&tp->timer);
9389 tg3_flag_set(tp, INIT_COMPLETE);
9390 tg3_enable_ints(tp);
9392 tg3_full_unlock(tp);
9394 netif_tx_start_all_queues(dev);
9397 * Reset loopback feature if it was turned on while the device was down
9398 * make sure that it's installed properly now.
9400 if (dev->features & NETIF_F_LOOPBACK)
9401 tg3_set_loopback(dev, dev->features);
9406 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9407 struct tg3_napi *tnapi = &tp->napi[i];
9408 free_irq(tnapi->irq_vec, tnapi);
9412 tg3_napi_disable(tp);
9414 tg3_free_consistent(tp);
9421 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9422 struct rtnl_link_stats64 *);
9423 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9425 static int tg3_close(struct net_device *dev)
9428 struct tg3 *tp = netdev_priv(dev);
9430 tg3_napi_disable(tp);
9431 cancel_work_sync(&tp->reset_task);
9433 netif_tx_stop_all_queues(dev);
9435 del_timer_sync(&tp->timer);
9439 tg3_full_lock(tp, 1);
9441 tg3_disable_ints(tp);
9443 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9445 tg3_flag_clear(tp, INIT_COMPLETE);
9447 tg3_full_unlock(tp);
9449 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9450 struct tg3_napi *tnapi = &tp->napi[i];
9451 free_irq(tnapi->irq_vec, tnapi);
9456 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9458 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9459 sizeof(tp->estats_prev));
9463 tg3_free_consistent(tp);
9467 netif_carrier_off(tp->dev);
9472 static inline u64 get_stat64(tg3_stat64_t *val)
9474 return ((u64)val->high << 32) | ((u64)val->low);
9477 static u64 calc_crc_errors(struct tg3 *tp)
9479 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9481 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9482 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9486 spin_lock_bh(&tp->lock);
9487 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9488 tg3_writephy(tp, MII_TG3_TEST1,
9489 val | MII_TG3_TEST1_CRC_EN);
9490 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9493 spin_unlock_bh(&tp->lock);
9495 tp->phy_crc_errors += val;
9497 return tp->phy_crc_errors;
9500 return get_stat64(&hw_stats->rx_fcs_errors);
9503 #define ESTAT_ADD(member) \
9504 estats->member = old_estats->member + \
9505 get_stat64(&hw_stats->member)
9507 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9509 struct tg3_ethtool_stats *estats = &tp->estats;
9510 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9511 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9516 ESTAT_ADD(rx_octets);
9517 ESTAT_ADD(rx_fragments);
9518 ESTAT_ADD(rx_ucast_packets);
9519 ESTAT_ADD(rx_mcast_packets);
9520 ESTAT_ADD(rx_bcast_packets);
9521 ESTAT_ADD(rx_fcs_errors);
9522 ESTAT_ADD(rx_align_errors);
9523 ESTAT_ADD(rx_xon_pause_rcvd);
9524 ESTAT_ADD(rx_xoff_pause_rcvd);
9525 ESTAT_ADD(rx_mac_ctrl_rcvd);
9526 ESTAT_ADD(rx_xoff_entered);
9527 ESTAT_ADD(rx_frame_too_long_errors);
9528 ESTAT_ADD(rx_jabbers);
9529 ESTAT_ADD(rx_undersize_packets);
9530 ESTAT_ADD(rx_in_length_errors);
9531 ESTAT_ADD(rx_out_length_errors);
9532 ESTAT_ADD(rx_64_or_less_octet_packets);
9533 ESTAT_ADD(rx_65_to_127_octet_packets);
9534 ESTAT_ADD(rx_128_to_255_octet_packets);
9535 ESTAT_ADD(rx_256_to_511_octet_packets);
9536 ESTAT_ADD(rx_512_to_1023_octet_packets);
9537 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9538 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9539 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9540 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9541 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9543 ESTAT_ADD(tx_octets);
9544 ESTAT_ADD(tx_collisions);
9545 ESTAT_ADD(tx_xon_sent);
9546 ESTAT_ADD(tx_xoff_sent);
9547 ESTAT_ADD(tx_flow_control);
9548 ESTAT_ADD(tx_mac_errors);
9549 ESTAT_ADD(tx_single_collisions);
9550 ESTAT_ADD(tx_mult_collisions);
9551 ESTAT_ADD(tx_deferred);
9552 ESTAT_ADD(tx_excessive_collisions);
9553 ESTAT_ADD(tx_late_collisions);
9554 ESTAT_ADD(tx_collide_2times);
9555 ESTAT_ADD(tx_collide_3times);
9556 ESTAT_ADD(tx_collide_4times);
9557 ESTAT_ADD(tx_collide_5times);
9558 ESTAT_ADD(tx_collide_6times);
9559 ESTAT_ADD(tx_collide_7times);
9560 ESTAT_ADD(tx_collide_8times);
9561 ESTAT_ADD(tx_collide_9times);
9562 ESTAT_ADD(tx_collide_10times);
9563 ESTAT_ADD(tx_collide_11times);
9564 ESTAT_ADD(tx_collide_12times);
9565 ESTAT_ADD(tx_collide_13times);
9566 ESTAT_ADD(tx_collide_14times);
9567 ESTAT_ADD(tx_collide_15times);
9568 ESTAT_ADD(tx_ucast_packets);
9569 ESTAT_ADD(tx_mcast_packets);
9570 ESTAT_ADD(tx_bcast_packets);
9571 ESTAT_ADD(tx_carrier_sense_errors);
9572 ESTAT_ADD(tx_discards);
9573 ESTAT_ADD(tx_errors);
9575 ESTAT_ADD(dma_writeq_full);
9576 ESTAT_ADD(dma_write_prioq_full);
9577 ESTAT_ADD(rxbds_empty);
9578 ESTAT_ADD(rx_discards);
9579 ESTAT_ADD(rx_errors);
9580 ESTAT_ADD(rx_threshold_hit);
9582 ESTAT_ADD(dma_readq_full);
9583 ESTAT_ADD(dma_read_prioq_full);
9584 ESTAT_ADD(tx_comp_queue_full);
9586 ESTAT_ADD(ring_set_send_prod_index);
9587 ESTAT_ADD(ring_status_update);
9588 ESTAT_ADD(nic_irqs);
9589 ESTAT_ADD(nic_avoided_irqs);
9590 ESTAT_ADD(nic_tx_threshold_hit);
9592 ESTAT_ADD(mbuf_lwm_thresh_hit);
9597 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9598 struct rtnl_link_stats64 *stats)
9600 struct tg3 *tp = netdev_priv(dev);
9601 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9602 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9607 stats->rx_packets = old_stats->rx_packets +
9608 get_stat64(&hw_stats->rx_ucast_packets) +
9609 get_stat64(&hw_stats->rx_mcast_packets) +
9610 get_stat64(&hw_stats->rx_bcast_packets);
9612 stats->tx_packets = old_stats->tx_packets +
9613 get_stat64(&hw_stats->tx_ucast_packets) +
9614 get_stat64(&hw_stats->tx_mcast_packets) +
9615 get_stat64(&hw_stats->tx_bcast_packets);
9617 stats->rx_bytes = old_stats->rx_bytes +
9618 get_stat64(&hw_stats->rx_octets);
9619 stats->tx_bytes = old_stats->tx_bytes +
9620 get_stat64(&hw_stats->tx_octets);
9622 stats->rx_errors = old_stats->rx_errors +
9623 get_stat64(&hw_stats->rx_errors);
9624 stats->tx_errors = old_stats->tx_errors +
9625 get_stat64(&hw_stats->tx_errors) +
9626 get_stat64(&hw_stats->tx_mac_errors) +
9627 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9628 get_stat64(&hw_stats->tx_discards);
9630 stats->multicast = old_stats->multicast +
9631 get_stat64(&hw_stats->rx_mcast_packets);
9632 stats->collisions = old_stats->collisions +
9633 get_stat64(&hw_stats->tx_collisions);
9635 stats->rx_length_errors = old_stats->rx_length_errors +
9636 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9637 get_stat64(&hw_stats->rx_undersize_packets);
9639 stats->rx_over_errors = old_stats->rx_over_errors +
9640 get_stat64(&hw_stats->rxbds_empty);
9641 stats->rx_frame_errors = old_stats->rx_frame_errors +
9642 get_stat64(&hw_stats->rx_align_errors);
9643 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9644 get_stat64(&hw_stats->tx_discards);
9645 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9646 get_stat64(&hw_stats->tx_carrier_sense_errors);
9648 stats->rx_crc_errors = old_stats->rx_crc_errors +
9649 calc_crc_errors(tp);
9651 stats->rx_missed_errors = old_stats->rx_missed_errors +
9652 get_stat64(&hw_stats->rx_discards);
9654 stats->rx_dropped = tp->rx_dropped;
9659 static inline u32 calc_crc(unsigned char *buf, int len)
9667 for (j = 0; j < len; j++) {
9670 for (k = 0; k < 8; k++) {
9683 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9685 /* accept or reject all multicast frames */
9686 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9687 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9688 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9689 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9692 static void __tg3_set_rx_mode(struct net_device *dev)
9694 struct tg3 *tp = netdev_priv(dev);
9697 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9698 RX_MODE_KEEP_VLAN_TAG);
9700 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9701 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9704 if (!tg3_flag(tp, ENABLE_ASF))
9705 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9708 if (dev->flags & IFF_PROMISC) {
9709 /* Promiscuous mode. */
9710 rx_mode |= RX_MODE_PROMISC;
9711 } else if (dev->flags & IFF_ALLMULTI) {
9712 /* Accept all multicast. */
9713 tg3_set_multi(tp, 1);
9714 } else if (netdev_mc_empty(dev)) {
9715 /* Reject all multicast. */
9716 tg3_set_multi(tp, 0);
9718 /* Accept one or more multicast(s). */
9719 struct netdev_hw_addr *ha;
9720 u32 mc_filter[4] = { 0, };
9725 netdev_for_each_mc_addr(ha, dev) {
9726 crc = calc_crc(ha->addr, ETH_ALEN);
9728 regidx = (bit & 0x60) >> 5;
9730 mc_filter[regidx] |= (1 << bit);
9733 tw32(MAC_HASH_REG_0, mc_filter[0]);
9734 tw32(MAC_HASH_REG_1, mc_filter[1]);
9735 tw32(MAC_HASH_REG_2, mc_filter[2]);
9736 tw32(MAC_HASH_REG_3, mc_filter[3]);
9739 if (rx_mode != tp->rx_mode) {
9740 tp->rx_mode = rx_mode;
9741 tw32_f(MAC_RX_MODE, rx_mode);
9746 static void tg3_set_rx_mode(struct net_device *dev)
9748 struct tg3 *tp = netdev_priv(dev);
9750 if (!netif_running(dev))
9753 tg3_full_lock(tp, 0);
9754 __tg3_set_rx_mode(dev);
9755 tg3_full_unlock(tp);
9758 static int tg3_get_regs_len(struct net_device *dev)
9760 return TG3_REG_BLK_SIZE;
9763 static void tg3_get_regs(struct net_device *dev,
9764 struct ethtool_regs *regs, void *_p)
9766 struct tg3 *tp = netdev_priv(dev);
9770 memset(_p, 0, TG3_REG_BLK_SIZE);
9772 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9775 tg3_full_lock(tp, 0);
9777 tg3_dump_legacy_regs(tp, (u32 *)_p);
9779 tg3_full_unlock(tp);
9782 static int tg3_get_eeprom_len(struct net_device *dev)
9784 struct tg3 *tp = netdev_priv(dev);
9786 return tp->nvram_size;
9789 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9791 struct tg3 *tp = netdev_priv(dev);
9794 u32 i, offset, len, b_offset, b_count;
9797 if (tg3_flag(tp, NO_NVRAM))
9800 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9803 offset = eeprom->offset;
9807 eeprom->magic = TG3_EEPROM_MAGIC;
9810 /* adjustments to start on required 4 byte boundary */
9811 b_offset = offset & 3;
9812 b_count = 4 - b_offset;
9813 if (b_count > len) {
9814 /* i.e. offset=1 len=2 */
9817 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9820 memcpy(data, ((char *)&val) + b_offset, b_count);
9823 eeprom->len += b_count;
9826 /* read bytes up to the last 4 byte boundary */
9827 pd = &data[eeprom->len];
9828 for (i = 0; i < (len - (len & 3)); i += 4) {
9829 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9834 memcpy(pd + i, &val, 4);
9839 /* read last bytes not ending on 4 byte boundary */
9840 pd = &data[eeprom->len];
9842 b_offset = offset + len - b_count;
9843 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9846 memcpy(pd, &val, b_count);
9847 eeprom->len += b_count;
9852 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9854 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9856 struct tg3 *tp = netdev_priv(dev);
9858 u32 offset, len, b_offset, odd_len;
9862 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9865 if (tg3_flag(tp, NO_NVRAM) ||
9866 eeprom->magic != TG3_EEPROM_MAGIC)
9869 offset = eeprom->offset;
9872 if ((b_offset = (offset & 3))) {
9873 /* adjustments to start on required 4 byte boundary */
9874 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9885 /* adjustments to end on required 4 byte boundary */
9887 len = (len + 3) & ~3;
9888 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9894 if (b_offset || odd_len) {
9895 buf = kmalloc(len, GFP_KERNEL);
9899 memcpy(buf, &start, 4);
9901 memcpy(buf+len-4, &end, 4);
9902 memcpy(buf + b_offset, data, eeprom->len);
9905 ret = tg3_nvram_write_block(tp, offset, len, buf);
9913 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9915 struct tg3 *tp = netdev_priv(dev);
9917 if (tg3_flag(tp, USE_PHYLIB)) {
9918 struct phy_device *phydev;
9919 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9921 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9922 return phy_ethtool_gset(phydev, cmd);
9925 cmd->supported = (SUPPORTED_Autoneg);
9927 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9928 cmd->supported |= (SUPPORTED_1000baseT_Half |
9929 SUPPORTED_1000baseT_Full);
9931 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9932 cmd->supported |= (SUPPORTED_100baseT_Half |
9933 SUPPORTED_100baseT_Full |
9934 SUPPORTED_10baseT_Half |
9935 SUPPORTED_10baseT_Full |
9937 cmd->port = PORT_TP;
9939 cmd->supported |= SUPPORTED_FIBRE;
9940 cmd->port = PORT_FIBRE;
9943 cmd->advertising = tp->link_config.advertising;
9944 if (netif_running(dev)) {
9945 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9946 cmd->duplex = tp->link_config.active_duplex;
9948 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9949 cmd->duplex = DUPLEX_INVALID;
9951 cmd->phy_address = tp->phy_addr;
9952 cmd->transceiver = XCVR_INTERNAL;
9953 cmd->autoneg = tp->link_config.autoneg;
9959 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9961 struct tg3 *tp = netdev_priv(dev);
9962 u32 speed = ethtool_cmd_speed(cmd);
9964 if (tg3_flag(tp, USE_PHYLIB)) {
9965 struct phy_device *phydev;
9966 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9968 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9969 return phy_ethtool_sset(phydev, cmd);
9972 if (cmd->autoneg != AUTONEG_ENABLE &&
9973 cmd->autoneg != AUTONEG_DISABLE)
9976 if (cmd->autoneg == AUTONEG_DISABLE &&
9977 cmd->duplex != DUPLEX_FULL &&
9978 cmd->duplex != DUPLEX_HALF)
9981 if (cmd->autoneg == AUTONEG_ENABLE) {
9982 u32 mask = ADVERTISED_Autoneg |
9984 ADVERTISED_Asym_Pause;
9986 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9987 mask |= ADVERTISED_1000baseT_Half |
9988 ADVERTISED_1000baseT_Full;
9990 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9991 mask |= ADVERTISED_100baseT_Half |
9992 ADVERTISED_100baseT_Full |
9993 ADVERTISED_10baseT_Half |
9994 ADVERTISED_10baseT_Full |
9997 mask |= ADVERTISED_FIBRE;
9999 if (cmd->advertising & ~mask)
10002 mask &= (ADVERTISED_1000baseT_Half |
10003 ADVERTISED_1000baseT_Full |
10004 ADVERTISED_100baseT_Half |
10005 ADVERTISED_100baseT_Full |
10006 ADVERTISED_10baseT_Half |
10007 ADVERTISED_10baseT_Full);
10009 cmd->advertising &= mask;
10011 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10012 if (speed != SPEED_1000)
10015 if (cmd->duplex != DUPLEX_FULL)
10018 if (speed != SPEED_100 &&
10024 tg3_full_lock(tp, 0);
10026 tp->link_config.autoneg = cmd->autoneg;
10027 if (cmd->autoneg == AUTONEG_ENABLE) {
10028 tp->link_config.advertising = (cmd->advertising |
10029 ADVERTISED_Autoneg);
10030 tp->link_config.speed = SPEED_INVALID;
10031 tp->link_config.duplex = DUPLEX_INVALID;
10033 tp->link_config.advertising = 0;
10034 tp->link_config.speed = speed;
10035 tp->link_config.duplex = cmd->duplex;
10038 tp->link_config.orig_speed = tp->link_config.speed;
10039 tp->link_config.orig_duplex = tp->link_config.duplex;
10040 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10042 if (netif_running(dev))
10043 tg3_setup_phy(tp, 1);
10045 tg3_full_unlock(tp);
10050 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10052 struct tg3 *tp = netdev_priv(dev);
10054 strcpy(info->driver, DRV_MODULE_NAME);
10055 strcpy(info->version, DRV_MODULE_VERSION);
10056 strcpy(info->fw_version, tp->fw_ver);
10057 strcpy(info->bus_info, pci_name(tp->pdev));
10060 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10062 struct tg3 *tp = netdev_priv(dev);
10064 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10065 wol->supported = WAKE_MAGIC;
10067 wol->supported = 0;
10069 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10070 wol->wolopts = WAKE_MAGIC;
10071 memset(&wol->sopass, 0, sizeof(wol->sopass));
10074 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10076 struct tg3 *tp = netdev_priv(dev);
10077 struct device *dp = &tp->pdev->dev;
10079 if (wol->wolopts & ~WAKE_MAGIC)
10081 if ((wol->wolopts & WAKE_MAGIC) &&
10082 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10085 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10087 spin_lock_bh(&tp->lock);
10088 if (device_may_wakeup(dp))
10089 tg3_flag_set(tp, WOL_ENABLE);
10091 tg3_flag_clear(tp, WOL_ENABLE);
10092 spin_unlock_bh(&tp->lock);
10097 static u32 tg3_get_msglevel(struct net_device *dev)
10099 struct tg3 *tp = netdev_priv(dev);
10100 return tp->msg_enable;
10103 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10105 struct tg3 *tp = netdev_priv(dev);
10106 tp->msg_enable = value;
10109 static int tg3_nway_reset(struct net_device *dev)
10111 struct tg3 *tp = netdev_priv(dev);
10114 if (!netif_running(dev))
10117 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10120 if (tg3_flag(tp, USE_PHYLIB)) {
10121 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10123 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10127 spin_lock_bh(&tp->lock);
10129 tg3_readphy(tp, MII_BMCR, &bmcr);
10130 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10131 ((bmcr & BMCR_ANENABLE) ||
10132 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10133 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10137 spin_unlock_bh(&tp->lock);
10143 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10145 struct tg3 *tp = netdev_priv(dev);
10147 ering->rx_max_pending = tp->rx_std_ring_mask;
10148 ering->rx_mini_max_pending = 0;
10149 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10150 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10152 ering->rx_jumbo_max_pending = 0;
10154 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10156 ering->rx_pending = tp->rx_pending;
10157 ering->rx_mini_pending = 0;
10158 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10159 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10161 ering->rx_jumbo_pending = 0;
10163 ering->tx_pending = tp->napi[0].tx_pending;
10166 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10168 struct tg3 *tp = netdev_priv(dev);
10169 int i, irq_sync = 0, err = 0;
10171 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10172 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10173 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10174 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10175 (tg3_flag(tp, TSO_BUG) &&
10176 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10179 if (netif_running(dev)) {
10181 tg3_netif_stop(tp);
10185 tg3_full_lock(tp, irq_sync);
10187 tp->rx_pending = ering->rx_pending;
10189 if (tg3_flag(tp, MAX_RXPEND_64) &&
10190 tp->rx_pending > 63)
10191 tp->rx_pending = 63;
10192 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10194 for (i = 0; i < tp->irq_max; i++)
10195 tp->napi[i].tx_pending = ering->tx_pending;
10197 if (netif_running(dev)) {
10198 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10199 err = tg3_restart_hw(tp, 1);
10201 tg3_netif_start(tp);
10204 tg3_full_unlock(tp);
10206 if (irq_sync && !err)
10212 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10214 struct tg3 *tp = netdev_priv(dev);
10216 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10218 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10219 epause->rx_pause = 1;
10221 epause->rx_pause = 0;
10223 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10224 epause->tx_pause = 1;
10226 epause->tx_pause = 0;
10229 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10231 struct tg3 *tp = netdev_priv(dev);
10234 if (tg3_flag(tp, USE_PHYLIB)) {
10236 struct phy_device *phydev;
10238 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10240 if (!(phydev->supported & SUPPORTED_Pause) ||
10241 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10242 (epause->rx_pause != epause->tx_pause)))
10245 tp->link_config.flowctrl = 0;
10246 if (epause->rx_pause) {
10247 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10249 if (epause->tx_pause) {
10250 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10251 newadv = ADVERTISED_Pause;
10253 newadv = ADVERTISED_Pause |
10254 ADVERTISED_Asym_Pause;
10255 } else if (epause->tx_pause) {
10256 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10257 newadv = ADVERTISED_Asym_Pause;
10261 if (epause->autoneg)
10262 tg3_flag_set(tp, PAUSE_AUTONEG);
10264 tg3_flag_clear(tp, PAUSE_AUTONEG);
10266 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10267 u32 oldadv = phydev->advertising &
10268 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10269 if (oldadv != newadv) {
10270 phydev->advertising &=
10271 ~(ADVERTISED_Pause |
10272 ADVERTISED_Asym_Pause);
10273 phydev->advertising |= newadv;
10274 if (phydev->autoneg) {
10276 * Always renegotiate the link to
10277 * inform our link partner of our
10278 * flow control settings, even if the
10279 * flow control is forced. Let
10280 * tg3_adjust_link() do the final
10281 * flow control setup.
10283 return phy_start_aneg(phydev);
10287 if (!epause->autoneg)
10288 tg3_setup_flow_control(tp, 0, 0);
10290 tp->link_config.orig_advertising &=
10291 ~(ADVERTISED_Pause |
10292 ADVERTISED_Asym_Pause);
10293 tp->link_config.orig_advertising |= newadv;
10298 if (netif_running(dev)) {
10299 tg3_netif_stop(tp);
10303 tg3_full_lock(tp, irq_sync);
10305 if (epause->autoneg)
10306 tg3_flag_set(tp, PAUSE_AUTONEG);
10308 tg3_flag_clear(tp, PAUSE_AUTONEG);
10309 if (epause->rx_pause)
10310 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10312 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10313 if (epause->tx_pause)
10314 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10316 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10318 if (netif_running(dev)) {
10319 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10320 err = tg3_restart_hw(tp, 1);
10322 tg3_netif_start(tp);
10325 tg3_full_unlock(tp);
10331 static int tg3_get_sset_count(struct net_device *dev, int sset)
10335 return TG3_NUM_TEST;
10337 return TG3_NUM_STATS;
10339 return -EOPNOTSUPP;
10343 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10345 switch (stringset) {
10347 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10350 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10353 WARN_ON(1); /* we need a WARN() */
10358 static int tg3_set_phys_id(struct net_device *dev,
10359 enum ethtool_phys_id_state state)
10361 struct tg3 *tp = netdev_priv(dev);
10363 if (!netif_running(tp->dev))
10367 case ETHTOOL_ID_ACTIVE:
10368 return 1; /* cycle on/off once per second */
10370 case ETHTOOL_ID_ON:
10371 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10372 LED_CTRL_1000MBPS_ON |
10373 LED_CTRL_100MBPS_ON |
10374 LED_CTRL_10MBPS_ON |
10375 LED_CTRL_TRAFFIC_OVERRIDE |
10376 LED_CTRL_TRAFFIC_BLINK |
10377 LED_CTRL_TRAFFIC_LED);
10380 case ETHTOOL_ID_OFF:
10381 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10382 LED_CTRL_TRAFFIC_OVERRIDE);
10385 case ETHTOOL_ID_INACTIVE:
10386 tw32(MAC_LED_CTRL, tp->led_ctrl);
10393 static void tg3_get_ethtool_stats(struct net_device *dev,
10394 struct ethtool_stats *estats, u64 *tmp_stats)
10396 struct tg3 *tp = netdev_priv(dev);
10397 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10400 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10404 u32 offset = 0, len = 0;
10407 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10410 if (magic == TG3_EEPROM_MAGIC) {
10411 for (offset = TG3_NVM_DIR_START;
10412 offset < TG3_NVM_DIR_END;
10413 offset += TG3_NVM_DIRENT_SIZE) {
10414 if (tg3_nvram_read(tp, offset, &val))
10417 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10418 TG3_NVM_DIRTYPE_EXTVPD)
10422 if (offset != TG3_NVM_DIR_END) {
10423 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10424 if (tg3_nvram_read(tp, offset + 4, &offset))
10427 offset = tg3_nvram_logical_addr(tp, offset);
10431 if (!offset || !len) {
10432 offset = TG3_NVM_VPD_OFF;
10433 len = TG3_NVM_VPD_LEN;
10436 buf = kmalloc(len, GFP_KERNEL);
10440 if (magic == TG3_EEPROM_MAGIC) {
10441 for (i = 0; i < len; i += 4) {
10442 /* The data is in little-endian format in NVRAM.
10443 * Use the big-endian read routines to preserve
10444 * the byte order as it exists in NVRAM.
10446 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10452 unsigned int pos = 0;
10454 ptr = (u8 *)&buf[0];
10455 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10456 cnt = pci_read_vpd(tp->pdev, pos,
10458 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10474 #define NVRAM_TEST_SIZE 0x100
10475 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10476 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10477 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10478 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10479 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10481 static int tg3_test_nvram(struct tg3 *tp)
10485 int i, j, k, err = 0, size;
10487 if (tg3_flag(tp, NO_NVRAM))
10490 if (tg3_nvram_read(tp, 0, &magic) != 0)
10493 if (magic == TG3_EEPROM_MAGIC)
10494 size = NVRAM_TEST_SIZE;
10495 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10496 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10497 TG3_EEPROM_SB_FORMAT_1) {
10498 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10499 case TG3_EEPROM_SB_REVISION_0:
10500 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10502 case TG3_EEPROM_SB_REVISION_2:
10503 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10505 case TG3_EEPROM_SB_REVISION_3:
10506 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10513 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10514 size = NVRAM_SELFBOOT_HW_SIZE;
10518 buf = kmalloc(size, GFP_KERNEL);
10523 for (i = 0, j = 0; i < size; i += 4, j++) {
10524 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10531 /* Selfboot format */
10532 magic = be32_to_cpu(buf[0]);
10533 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10534 TG3_EEPROM_MAGIC_FW) {
10535 u8 *buf8 = (u8 *) buf, csum8 = 0;
10537 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10538 TG3_EEPROM_SB_REVISION_2) {
10539 /* For rev 2, the csum doesn't include the MBA. */
10540 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10542 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10545 for (i = 0; i < size; i++)
10558 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10559 TG3_EEPROM_MAGIC_HW) {
10560 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10561 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10562 u8 *buf8 = (u8 *) buf;
10564 /* Separate the parity bits and the data bytes. */
10565 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10566 if ((i == 0) || (i == 8)) {
10570 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10571 parity[k++] = buf8[i] & msk;
10573 } else if (i == 16) {
10577 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10578 parity[k++] = buf8[i] & msk;
10581 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10582 parity[k++] = buf8[i] & msk;
10585 data[j++] = buf8[i];
10589 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10590 u8 hw8 = hweight8(data[i]);
10592 if ((hw8 & 0x1) && parity[i])
10594 else if (!(hw8 & 0x1) && !parity[i])
10603 /* Bootstrap checksum at offset 0x10 */
10604 csum = calc_crc((unsigned char *) buf, 0x10);
10605 if (csum != le32_to_cpu(buf[0x10/4]))
10608 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10609 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10610 if (csum != le32_to_cpu(buf[0xfc/4]))
10615 buf = tg3_vpd_readblock(tp);
10619 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10620 PCI_VPD_LRDT_RO_DATA);
10622 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10626 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10629 i += PCI_VPD_LRDT_TAG_SIZE;
10630 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10631 PCI_VPD_RO_KEYWORD_CHKSUM);
10635 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10637 for (i = 0; i <= j; i++)
10638 csum8 += ((u8 *)buf)[i];
10652 #define TG3_SERDES_TIMEOUT_SEC 2
10653 #define TG3_COPPER_TIMEOUT_SEC 6
10655 static int tg3_test_link(struct tg3 *tp)
10659 if (!netif_running(tp->dev))
10662 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10663 max = TG3_SERDES_TIMEOUT_SEC;
10665 max = TG3_COPPER_TIMEOUT_SEC;
10667 for (i = 0; i < max; i++) {
10668 if (netif_carrier_ok(tp->dev))
10671 if (msleep_interruptible(1000))
10678 /* Only test the commonly used registers */
10679 static int tg3_test_registers(struct tg3 *tp)
10681 int i, is_5705, is_5750;
10682 u32 offset, read_mask, write_mask, val, save_val, read_val;
10686 #define TG3_FL_5705 0x1
10687 #define TG3_FL_NOT_5705 0x2
10688 #define TG3_FL_NOT_5788 0x4
10689 #define TG3_FL_NOT_5750 0x8
10693 /* MAC Control Registers */
10694 { MAC_MODE, TG3_FL_NOT_5705,
10695 0x00000000, 0x00ef6f8c },
10696 { MAC_MODE, TG3_FL_5705,
10697 0x00000000, 0x01ef6b8c },
10698 { MAC_STATUS, TG3_FL_NOT_5705,
10699 0x03800107, 0x00000000 },
10700 { MAC_STATUS, TG3_FL_5705,
10701 0x03800100, 0x00000000 },
10702 { MAC_ADDR_0_HIGH, 0x0000,
10703 0x00000000, 0x0000ffff },
10704 { MAC_ADDR_0_LOW, 0x0000,
10705 0x00000000, 0xffffffff },
10706 { MAC_RX_MTU_SIZE, 0x0000,
10707 0x00000000, 0x0000ffff },
10708 { MAC_TX_MODE, 0x0000,
10709 0x00000000, 0x00000070 },
10710 { MAC_TX_LENGTHS, 0x0000,
10711 0x00000000, 0x00003fff },
10712 { MAC_RX_MODE, TG3_FL_NOT_5705,
10713 0x00000000, 0x000007fc },
10714 { MAC_RX_MODE, TG3_FL_5705,
10715 0x00000000, 0x000007dc },
10716 { MAC_HASH_REG_0, 0x0000,
10717 0x00000000, 0xffffffff },
10718 { MAC_HASH_REG_1, 0x0000,
10719 0x00000000, 0xffffffff },
10720 { MAC_HASH_REG_2, 0x0000,
10721 0x00000000, 0xffffffff },
10722 { MAC_HASH_REG_3, 0x0000,
10723 0x00000000, 0xffffffff },
10725 /* Receive Data and Receive BD Initiator Control Registers. */
10726 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10727 0x00000000, 0xffffffff },
10728 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10729 0x00000000, 0xffffffff },
10730 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10731 0x00000000, 0x00000003 },
10732 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10733 0x00000000, 0xffffffff },
10734 { RCVDBDI_STD_BD+0, 0x0000,
10735 0x00000000, 0xffffffff },
10736 { RCVDBDI_STD_BD+4, 0x0000,
10737 0x00000000, 0xffffffff },
10738 { RCVDBDI_STD_BD+8, 0x0000,
10739 0x00000000, 0xffff0002 },
10740 { RCVDBDI_STD_BD+0xc, 0x0000,
10741 0x00000000, 0xffffffff },
10743 /* Receive BD Initiator Control Registers. */
10744 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10745 0x00000000, 0xffffffff },
10746 { RCVBDI_STD_THRESH, TG3_FL_5705,
10747 0x00000000, 0x000003ff },
10748 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10749 0x00000000, 0xffffffff },
10751 /* Host Coalescing Control Registers. */
10752 { HOSTCC_MODE, TG3_FL_NOT_5705,
10753 0x00000000, 0x00000004 },
10754 { HOSTCC_MODE, TG3_FL_5705,
10755 0x00000000, 0x000000f6 },
10756 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10757 0x00000000, 0xffffffff },
10758 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10759 0x00000000, 0x000003ff },
10760 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10761 0x00000000, 0xffffffff },
10762 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10763 0x00000000, 0x000003ff },
10764 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10765 0x00000000, 0xffffffff },
10766 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10767 0x00000000, 0x000000ff },
10768 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10769 0x00000000, 0xffffffff },
10770 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10771 0x00000000, 0x000000ff },
10772 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10773 0x00000000, 0xffffffff },
10774 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10775 0x00000000, 0xffffffff },
10776 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10777 0x00000000, 0xffffffff },
10778 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10779 0x00000000, 0x000000ff },
10780 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10781 0x00000000, 0xffffffff },
10782 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10783 0x00000000, 0x000000ff },
10784 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10785 0x00000000, 0xffffffff },
10786 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10787 0x00000000, 0xffffffff },
10788 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10789 0x00000000, 0xffffffff },
10790 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10791 0x00000000, 0xffffffff },
10792 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10793 0x00000000, 0xffffffff },
10794 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10795 0xffffffff, 0x00000000 },
10796 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10797 0xffffffff, 0x00000000 },
10799 /* Buffer Manager Control Registers. */
10800 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10801 0x00000000, 0x007fff80 },
10802 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10803 0x00000000, 0x007fffff },
10804 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10805 0x00000000, 0x0000003f },
10806 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10807 0x00000000, 0x000001ff },
10808 { BUFMGR_MB_HIGH_WATER, 0x0000,
10809 0x00000000, 0x000001ff },
10810 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10811 0xffffffff, 0x00000000 },
10812 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10813 0xffffffff, 0x00000000 },
10815 /* Mailbox Registers */
10816 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10817 0x00000000, 0x000001ff },
10818 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10819 0x00000000, 0x000001ff },
10820 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10821 0x00000000, 0x000007ff },
10822 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10823 0x00000000, 0x000001ff },
10825 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10828 is_5705 = is_5750 = 0;
10829 if (tg3_flag(tp, 5705_PLUS)) {
10831 if (tg3_flag(tp, 5750_PLUS))
10835 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10836 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10839 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10842 if (tg3_flag(tp, IS_5788) &&
10843 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10846 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10849 offset = (u32) reg_tbl[i].offset;
10850 read_mask = reg_tbl[i].read_mask;
10851 write_mask = reg_tbl[i].write_mask;
10853 /* Save the original register content */
10854 save_val = tr32(offset);
10856 /* Determine the read-only value. */
10857 read_val = save_val & read_mask;
10859 /* Write zero to the register, then make sure the read-only bits
10860 * are not changed and the read/write bits are all zeros.
10864 val = tr32(offset);
10866 /* Test the read-only and read/write bits. */
10867 if (((val & read_mask) != read_val) || (val & write_mask))
10870 /* Write ones to all the bits defined by RdMask and WrMask, then
10871 * make sure the read-only bits are not changed and the
10872 * read/write bits are all ones.
10874 tw32(offset, read_mask | write_mask);
10876 val = tr32(offset);
10878 /* Test the read-only bits. */
10879 if ((val & read_mask) != read_val)
10882 /* Test the read/write bits. */
10883 if ((val & write_mask) != write_mask)
10886 tw32(offset, save_val);
10892 if (netif_msg_hw(tp))
10893 netdev_err(tp->dev,
10894 "Register test failed at offset %x\n", offset);
10895 tw32(offset, save_val);
10899 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10901 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10905 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10906 for (j = 0; j < len; j += 4) {
10909 tg3_write_mem(tp, offset + j, test_pattern[i]);
10910 tg3_read_mem(tp, offset + j, &val);
10911 if (val != test_pattern[i])
10918 static int tg3_test_memory(struct tg3 *tp)
10920 static struct mem_entry {
10923 } mem_tbl_570x[] = {
10924 { 0x00000000, 0x00b50},
10925 { 0x00002000, 0x1c000},
10926 { 0xffffffff, 0x00000}
10927 }, mem_tbl_5705[] = {
10928 { 0x00000100, 0x0000c},
10929 { 0x00000200, 0x00008},
10930 { 0x00004000, 0x00800},
10931 { 0x00006000, 0x01000},
10932 { 0x00008000, 0x02000},
10933 { 0x00010000, 0x0e000},
10934 { 0xffffffff, 0x00000}
10935 }, mem_tbl_5755[] = {
10936 { 0x00000200, 0x00008},
10937 { 0x00004000, 0x00800},
10938 { 0x00006000, 0x00800},
10939 { 0x00008000, 0x02000},
10940 { 0x00010000, 0x0c000},
10941 { 0xffffffff, 0x00000}
10942 }, mem_tbl_5906[] = {
10943 { 0x00000200, 0x00008},
10944 { 0x00004000, 0x00400},
10945 { 0x00006000, 0x00400},
10946 { 0x00008000, 0x01000},
10947 { 0x00010000, 0x01000},
10948 { 0xffffffff, 0x00000}
10949 }, mem_tbl_5717[] = {
10950 { 0x00000200, 0x00008},
10951 { 0x00010000, 0x0a000},
10952 { 0x00020000, 0x13c00},
10953 { 0xffffffff, 0x00000}
10954 }, mem_tbl_57765[] = {
10955 { 0x00000200, 0x00008},
10956 { 0x00004000, 0x00800},
10957 { 0x00006000, 0x09800},
10958 { 0x00010000, 0x0a000},
10959 { 0xffffffff, 0x00000}
10961 struct mem_entry *mem_tbl;
10965 if (tg3_flag(tp, 5717_PLUS))
10966 mem_tbl = mem_tbl_5717;
10967 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10968 mem_tbl = mem_tbl_57765;
10969 else if (tg3_flag(tp, 5755_PLUS))
10970 mem_tbl = mem_tbl_5755;
10971 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10972 mem_tbl = mem_tbl_5906;
10973 else if (tg3_flag(tp, 5705_PLUS))
10974 mem_tbl = mem_tbl_5705;
10976 mem_tbl = mem_tbl_570x;
10978 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10979 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10987 #define TG3_MAC_LOOPBACK 0
10988 #define TG3_PHY_LOOPBACK 1
10989 #define TG3_TSO_LOOPBACK 2
10991 #define TG3_TSO_MSS 500
10993 #define TG3_TSO_IP_HDR_LEN 20
10994 #define TG3_TSO_TCP_HDR_LEN 20
10995 #define TG3_TSO_TCP_OPT_LEN 12
10997 static const u8 tg3_tso_header[] = {
10999 0x45, 0x00, 0x00, 0x00,
11000 0x00, 0x00, 0x40, 0x00,
11001 0x40, 0x06, 0x00, 0x00,
11002 0x0a, 0x00, 0x00, 0x01,
11003 0x0a, 0x00, 0x00, 0x02,
11004 0x0d, 0x00, 0xe0, 0x00,
11005 0x00, 0x00, 0x01, 0x00,
11006 0x00, 0x00, 0x02, 0x00,
11007 0x80, 0x10, 0x10, 0x00,
11008 0x14, 0x09, 0x00, 0x00,
11009 0x01, 0x01, 0x08, 0x0a,
11010 0x11, 0x11, 0x11, 0x11,
11011 0x11, 0x11, 0x11, 0x11,
11014 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11016 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11017 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11018 struct sk_buff *skb, *rx_skb;
11021 int num_pkts, tx_len, rx_len, i, err;
11022 struct tg3_rx_buffer_desc *desc;
11023 struct tg3_napi *tnapi, *rnapi;
11024 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11026 tnapi = &tp->napi[0];
11027 rnapi = &tp->napi[0];
11028 if (tp->irq_cnt > 1) {
11029 if (tg3_flag(tp, ENABLE_RSS))
11030 rnapi = &tp->napi[1];
11031 if (tg3_flag(tp, ENABLE_TSS))
11032 tnapi = &tp->napi[1];
11034 coal_now = tnapi->coal_now | rnapi->coal_now;
11036 if (loopback_mode == TG3_MAC_LOOPBACK) {
11037 /* HW errata - mac loopback fails in some cases on 5780.
11038 * Normal traffic and PHY loopback are not affected by
11039 * errata. Also, the MAC loopback test is deprecated for
11040 * all newer ASIC revisions.
11042 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11043 tg3_flag(tp, CPMU_PRESENT))
11046 mac_mode = tp->mac_mode &
11047 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11048 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11049 if (!tg3_flag(tp, 5705_PLUS))
11050 mac_mode |= MAC_MODE_LINK_POLARITY;
11051 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11052 mac_mode |= MAC_MODE_PORT_MODE_MII;
11054 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11055 tw32(MAC_MODE, mac_mode);
11057 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11058 tg3_phy_fet_toggle_apd(tp, false);
11059 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11061 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11063 tg3_phy_toggle_automdix(tp, 0);
11065 tg3_writephy(tp, MII_BMCR, val);
11068 mac_mode = tp->mac_mode &
11069 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11070 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11071 tg3_writephy(tp, MII_TG3_FET_PTEST,
11072 MII_TG3_FET_PTEST_FRC_TX_LINK |
11073 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11074 /* The write needs to be flushed for the AC131 */
11075 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11076 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11077 mac_mode |= MAC_MODE_PORT_MODE_MII;
11079 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11081 /* reset to prevent losing 1st rx packet intermittently */
11082 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11083 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11085 tw32_f(MAC_RX_MODE, tp->rx_mode);
11087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11088 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11089 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11090 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11091 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11092 mac_mode |= MAC_MODE_LINK_POLARITY;
11093 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11094 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11096 tw32(MAC_MODE, mac_mode);
11098 /* Wait for link */
11099 for (i = 0; i < 100; i++) {
11100 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11109 skb = netdev_alloc_skb(tp->dev, tx_len);
11113 tx_data = skb_put(skb, tx_len);
11114 memcpy(tx_data, tp->dev->dev_addr, 6);
11115 memset(tx_data + 6, 0x0, 8);
11117 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11119 if (loopback_mode == TG3_TSO_LOOPBACK) {
11120 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11122 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11123 TG3_TSO_TCP_OPT_LEN;
11125 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11126 sizeof(tg3_tso_header));
11129 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11130 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11132 /* Set the total length field in the IP header */
11133 iph->tot_len = htons((u16)(mss + hdr_len));
11135 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11136 TXD_FLAG_CPU_POST_DMA);
11138 if (tg3_flag(tp, HW_TSO_1) ||
11139 tg3_flag(tp, HW_TSO_2) ||
11140 tg3_flag(tp, HW_TSO_3)) {
11142 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11143 th = (struct tcphdr *)&tx_data[val];
11146 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11148 if (tg3_flag(tp, HW_TSO_3)) {
11149 mss |= (hdr_len & 0xc) << 12;
11150 if (hdr_len & 0x10)
11151 base_flags |= 0x00000010;
11152 base_flags |= (hdr_len & 0x3e0) << 5;
11153 } else if (tg3_flag(tp, HW_TSO_2))
11154 mss |= hdr_len << 9;
11155 else if (tg3_flag(tp, HW_TSO_1) ||
11156 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11157 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11159 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11162 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11165 data_off = ETH_HLEN;
11168 for (i = data_off; i < tx_len; i++)
11169 tx_data[i] = (u8) (i & 0xff);
11171 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11172 if (pci_dma_mapping_error(tp->pdev, map)) {
11173 dev_kfree_skb(skb);
11177 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11182 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11184 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11185 base_flags, (mss << 1) | 1);
11189 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11190 tr32_mailbox(tnapi->prodmbox);
11194 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11195 for (i = 0; i < 35; i++) {
11196 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11201 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11202 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11203 if ((tx_idx == tnapi->tx_prod) &&
11204 (rx_idx == (rx_start_idx + num_pkts)))
11208 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11209 dev_kfree_skb(skb);
11211 if (tx_idx != tnapi->tx_prod)
11214 if (rx_idx != rx_start_idx + num_pkts)
11218 while (rx_idx != rx_start_idx) {
11219 desc = &rnapi->rx_rcb[rx_start_idx++];
11220 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11221 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11223 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11224 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11227 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11230 if (loopback_mode != TG3_TSO_LOOPBACK) {
11231 if (rx_len != tx_len)
11234 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11235 if (opaque_key != RXD_OPAQUE_RING_STD)
11238 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11241 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11242 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11243 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11247 if (opaque_key == RXD_OPAQUE_RING_STD) {
11248 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11249 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11251 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11252 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11253 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11258 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11259 PCI_DMA_FROMDEVICE);
11261 for (i = data_off; i < rx_len; i++, val++) {
11262 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11269 /* tg3_free_rings will unmap and free the rx_skb */
11274 #define TG3_STD_LOOPBACK_FAILED 1
11275 #define TG3_JMB_LOOPBACK_FAILED 2
11276 #define TG3_TSO_LOOPBACK_FAILED 4
11278 #define TG3_MAC_LOOPBACK_SHIFT 0
11279 #define TG3_PHY_LOOPBACK_SHIFT 4
11280 #define TG3_LOOPBACK_FAILED 0x00000077
11282 static int tg3_test_loopback(struct tg3 *tp)
11285 u32 eee_cap, cpmuctrl = 0;
11287 if (!netif_running(tp->dev))
11288 return TG3_LOOPBACK_FAILED;
11290 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11291 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11293 err = tg3_reset_hw(tp, 1);
11295 err = TG3_LOOPBACK_FAILED;
11299 if (tg3_flag(tp, ENABLE_RSS)) {
11302 /* Reroute all rx packets to the 1st queue */
11303 for (i = MAC_RSS_INDIR_TBL_0;
11304 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11308 /* Turn off gphy autopowerdown. */
11309 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11310 tg3_phy_toggle_apd(tp, false);
11312 if (tg3_flag(tp, CPMU_PRESENT)) {
11316 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11318 /* Wait for up to 40 microseconds to acquire lock. */
11319 for (i = 0; i < 4; i++) {
11320 status = tr32(TG3_CPMU_MUTEX_GNT);
11321 if (status == CPMU_MUTEX_GNT_DRIVER)
11326 if (status != CPMU_MUTEX_GNT_DRIVER) {
11327 err = TG3_LOOPBACK_FAILED;
11331 /* Turn off link-based power management. */
11332 cpmuctrl = tr32(TG3_CPMU_CTRL);
11333 tw32(TG3_CPMU_CTRL,
11334 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11335 CPMU_CTRL_LINK_AWARE_MODE));
11338 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11339 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11341 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11342 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11343 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11345 if (tg3_flag(tp, CPMU_PRESENT)) {
11346 tw32(TG3_CPMU_CTRL, cpmuctrl);
11348 /* Release the mutex */
11349 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11352 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11353 !tg3_flag(tp, USE_PHYLIB)) {
11354 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11355 err |= TG3_STD_LOOPBACK_FAILED <<
11356 TG3_PHY_LOOPBACK_SHIFT;
11357 if (tg3_flag(tp, TSO_CAPABLE) &&
11358 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11359 err |= TG3_TSO_LOOPBACK_FAILED <<
11360 TG3_PHY_LOOPBACK_SHIFT;
11361 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11362 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11363 err |= TG3_JMB_LOOPBACK_FAILED <<
11364 TG3_PHY_LOOPBACK_SHIFT;
11367 /* Re-enable gphy autopowerdown. */
11368 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11369 tg3_phy_toggle_apd(tp, true);
11372 tp->phy_flags |= eee_cap;
11377 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11380 struct tg3 *tp = netdev_priv(dev);
11382 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11385 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11387 if (tg3_test_nvram(tp) != 0) {
11388 etest->flags |= ETH_TEST_FL_FAILED;
11391 if (tg3_test_link(tp) != 0) {
11392 etest->flags |= ETH_TEST_FL_FAILED;
11395 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11396 int err, err2 = 0, irq_sync = 0;
11398 if (netif_running(dev)) {
11400 tg3_netif_stop(tp);
11404 tg3_full_lock(tp, irq_sync);
11406 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11407 err = tg3_nvram_lock(tp);
11408 tg3_halt_cpu(tp, RX_CPU_BASE);
11409 if (!tg3_flag(tp, 5705_PLUS))
11410 tg3_halt_cpu(tp, TX_CPU_BASE);
11412 tg3_nvram_unlock(tp);
11414 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11417 if (tg3_test_registers(tp) != 0) {
11418 etest->flags |= ETH_TEST_FL_FAILED;
11421 if (tg3_test_memory(tp) != 0) {
11422 etest->flags |= ETH_TEST_FL_FAILED;
11425 if ((data[4] = tg3_test_loopback(tp)) != 0)
11426 etest->flags |= ETH_TEST_FL_FAILED;
11428 tg3_full_unlock(tp);
11430 if (tg3_test_interrupt(tp) != 0) {
11431 etest->flags |= ETH_TEST_FL_FAILED;
11435 tg3_full_lock(tp, 0);
11437 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11438 if (netif_running(dev)) {
11439 tg3_flag_set(tp, INIT_COMPLETE);
11440 err2 = tg3_restart_hw(tp, 1);
11442 tg3_netif_start(tp);
11445 tg3_full_unlock(tp);
11447 if (irq_sync && !err2)
11450 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11451 tg3_power_down(tp);
11455 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11457 struct mii_ioctl_data *data = if_mii(ifr);
11458 struct tg3 *tp = netdev_priv(dev);
11461 if (tg3_flag(tp, USE_PHYLIB)) {
11462 struct phy_device *phydev;
11463 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11465 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11466 return phy_mii_ioctl(phydev, ifr, cmd);
11471 data->phy_id = tp->phy_addr;
11474 case SIOCGMIIREG: {
11477 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11478 break; /* We have no PHY */
11480 if (!netif_running(dev))
11483 spin_lock_bh(&tp->lock);
11484 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11485 spin_unlock_bh(&tp->lock);
11487 data->val_out = mii_regval;
11493 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11494 break; /* We have no PHY */
11496 if (!netif_running(dev))
11499 spin_lock_bh(&tp->lock);
11500 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11501 spin_unlock_bh(&tp->lock);
11509 return -EOPNOTSUPP;
11512 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11514 struct tg3 *tp = netdev_priv(dev);
11516 memcpy(ec, &tp->coal, sizeof(*ec));
11520 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11522 struct tg3 *tp = netdev_priv(dev);
11523 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11524 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11526 if (!tg3_flag(tp, 5705_PLUS)) {
11527 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11528 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11529 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11530 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11533 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11534 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11535 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11536 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11537 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11538 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11539 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11540 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11541 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11542 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11545 /* No rx interrupts will be generated if both are zero */
11546 if ((ec->rx_coalesce_usecs == 0) &&
11547 (ec->rx_max_coalesced_frames == 0))
11550 /* No tx interrupts will be generated if both are zero */
11551 if ((ec->tx_coalesce_usecs == 0) &&
11552 (ec->tx_max_coalesced_frames == 0))
11555 /* Only copy relevant parameters, ignore all others. */
11556 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11557 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11558 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11559 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11560 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11561 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11562 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11563 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11564 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11566 if (netif_running(dev)) {
11567 tg3_full_lock(tp, 0);
11568 __tg3_set_coalesce(tp, &tp->coal);
11569 tg3_full_unlock(tp);
11574 static const struct ethtool_ops tg3_ethtool_ops = {
11575 .get_settings = tg3_get_settings,
11576 .set_settings = tg3_set_settings,
11577 .get_drvinfo = tg3_get_drvinfo,
11578 .get_regs_len = tg3_get_regs_len,
11579 .get_regs = tg3_get_regs,
11580 .get_wol = tg3_get_wol,
11581 .set_wol = tg3_set_wol,
11582 .get_msglevel = tg3_get_msglevel,
11583 .set_msglevel = tg3_set_msglevel,
11584 .nway_reset = tg3_nway_reset,
11585 .get_link = ethtool_op_get_link,
11586 .get_eeprom_len = tg3_get_eeprom_len,
11587 .get_eeprom = tg3_get_eeprom,
11588 .set_eeprom = tg3_set_eeprom,
11589 .get_ringparam = tg3_get_ringparam,
11590 .set_ringparam = tg3_set_ringparam,
11591 .get_pauseparam = tg3_get_pauseparam,
11592 .set_pauseparam = tg3_set_pauseparam,
11593 .self_test = tg3_self_test,
11594 .get_strings = tg3_get_strings,
11595 .set_phys_id = tg3_set_phys_id,
11596 .get_ethtool_stats = tg3_get_ethtool_stats,
11597 .get_coalesce = tg3_get_coalesce,
11598 .set_coalesce = tg3_set_coalesce,
11599 .get_sset_count = tg3_get_sset_count,
11602 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11604 u32 cursize, val, magic;
11606 tp->nvram_size = EEPROM_CHIP_SIZE;
11608 if (tg3_nvram_read(tp, 0, &magic) != 0)
11611 if ((magic != TG3_EEPROM_MAGIC) &&
11612 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11613 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11617 * Size the chip by reading offsets at increasing powers of two.
11618 * When we encounter our validation signature, we know the addressing
11619 * has wrapped around, and thus have our chip size.
11623 while (cursize < tp->nvram_size) {
11624 if (tg3_nvram_read(tp, cursize, &val) != 0)
11633 tp->nvram_size = cursize;
11636 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11640 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11643 /* Selfboot format */
11644 if (val != TG3_EEPROM_MAGIC) {
11645 tg3_get_eeprom_size(tp);
11649 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11651 /* This is confusing. We want to operate on the
11652 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11653 * call will read from NVRAM and byteswap the data
11654 * according to the byteswapping settings for all
11655 * other register accesses. This ensures the data we
11656 * want will always reside in the lower 16-bits.
11657 * However, the data in NVRAM is in LE format, which
11658 * means the data from the NVRAM read will always be
11659 * opposite the endianness of the CPU. The 16-bit
11660 * byteswap then brings the data to CPU endianness.
11662 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11666 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11669 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11673 nvcfg1 = tr32(NVRAM_CFG1);
11674 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11675 tg3_flag_set(tp, FLASH);
11677 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11678 tw32(NVRAM_CFG1, nvcfg1);
11681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11682 tg3_flag(tp, 5780_CLASS)) {
11683 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11684 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11685 tp->nvram_jedecnum = JEDEC_ATMEL;
11686 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11687 tg3_flag_set(tp, NVRAM_BUFFERED);
11689 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11690 tp->nvram_jedecnum = JEDEC_ATMEL;
11691 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11693 case FLASH_VENDOR_ATMEL_EEPROM:
11694 tp->nvram_jedecnum = JEDEC_ATMEL;
11695 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11696 tg3_flag_set(tp, NVRAM_BUFFERED);
11698 case FLASH_VENDOR_ST:
11699 tp->nvram_jedecnum = JEDEC_ST;
11700 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11701 tg3_flag_set(tp, NVRAM_BUFFERED);
11703 case FLASH_VENDOR_SAIFUN:
11704 tp->nvram_jedecnum = JEDEC_SAIFUN;
11705 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11707 case FLASH_VENDOR_SST_SMALL:
11708 case FLASH_VENDOR_SST_LARGE:
11709 tp->nvram_jedecnum = JEDEC_SST;
11710 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11714 tp->nvram_jedecnum = JEDEC_ATMEL;
11715 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11716 tg3_flag_set(tp, NVRAM_BUFFERED);
11720 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11722 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11723 case FLASH_5752PAGE_SIZE_256:
11724 tp->nvram_pagesize = 256;
11726 case FLASH_5752PAGE_SIZE_512:
11727 tp->nvram_pagesize = 512;
11729 case FLASH_5752PAGE_SIZE_1K:
11730 tp->nvram_pagesize = 1024;
11732 case FLASH_5752PAGE_SIZE_2K:
11733 tp->nvram_pagesize = 2048;
11735 case FLASH_5752PAGE_SIZE_4K:
11736 tp->nvram_pagesize = 4096;
11738 case FLASH_5752PAGE_SIZE_264:
11739 tp->nvram_pagesize = 264;
11741 case FLASH_5752PAGE_SIZE_528:
11742 tp->nvram_pagesize = 528;
11747 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11751 nvcfg1 = tr32(NVRAM_CFG1);
11753 /* NVRAM protection for TPM */
11754 if (nvcfg1 & (1 << 27))
11755 tg3_flag_set(tp, PROTECTED_NVRAM);
11757 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11758 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11759 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11760 tp->nvram_jedecnum = JEDEC_ATMEL;
11761 tg3_flag_set(tp, NVRAM_BUFFERED);
11763 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11764 tp->nvram_jedecnum = JEDEC_ATMEL;
11765 tg3_flag_set(tp, NVRAM_BUFFERED);
11766 tg3_flag_set(tp, FLASH);
11768 case FLASH_5752VENDOR_ST_M45PE10:
11769 case FLASH_5752VENDOR_ST_M45PE20:
11770 case FLASH_5752VENDOR_ST_M45PE40:
11771 tp->nvram_jedecnum = JEDEC_ST;
11772 tg3_flag_set(tp, NVRAM_BUFFERED);
11773 tg3_flag_set(tp, FLASH);
11777 if (tg3_flag(tp, FLASH)) {
11778 tg3_nvram_get_pagesize(tp, nvcfg1);
11780 /* For eeprom, set pagesize to maximum eeprom size */
11781 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11783 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11784 tw32(NVRAM_CFG1, nvcfg1);
11788 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11790 u32 nvcfg1, protect = 0;
11792 nvcfg1 = tr32(NVRAM_CFG1);
11794 /* NVRAM protection for TPM */
11795 if (nvcfg1 & (1 << 27)) {
11796 tg3_flag_set(tp, PROTECTED_NVRAM);
11800 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11802 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11803 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11804 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11805 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11806 tp->nvram_jedecnum = JEDEC_ATMEL;
11807 tg3_flag_set(tp, NVRAM_BUFFERED);
11808 tg3_flag_set(tp, FLASH);
11809 tp->nvram_pagesize = 264;
11810 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11811 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11812 tp->nvram_size = (protect ? 0x3e200 :
11813 TG3_NVRAM_SIZE_512KB);
11814 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11815 tp->nvram_size = (protect ? 0x1f200 :
11816 TG3_NVRAM_SIZE_256KB);
11818 tp->nvram_size = (protect ? 0x1f200 :
11819 TG3_NVRAM_SIZE_128KB);
11821 case FLASH_5752VENDOR_ST_M45PE10:
11822 case FLASH_5752VENDOR_ST_M45PE20:
11823 case FLASH_5752VENDOR_ST_M45PE40:
11824 tp->nvram_jedecnum = JEDEC_ST;
11825 tg3_flag_set(tp, NVRAM_BUFFERED);
11826 tg3_flag_set(tp, FLASH);
11827 tp->nvram_pagesize = 256;
11828 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11829 tp->nvram_size = (protect ?
11830 TG3_NVRAM_SIZE_64KB :
11831 TG3_NVRAM_SIZE_128KB);
11832 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11833 tp->nvram_size = (protect ?
11834 TG3_NVRAM_SIZE_64KB :
11835 TG3_NVRAM_SIZE_256KB);
11837 tp->nvram_size = (protect ?
11838 TG3_NVRAM_SIZE_128KB :
11839 TG3_NVRAM_SIZE_512KB);
11844 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11848 nvcfg1 = tr32(NVRAM_CFG1);
11850 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11851 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11852 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11853 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11854 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11855 tp->nvram_jedecnum = JEDEC_ATMEL;
11856 tg3_flag_set(tp, NVRAM_BUFFERED);
11857 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11859 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11860 tw32(NVRAM_CFG1, nvcfg1);
11862 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11863 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11864 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11865 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11866 tp->nvram_jedecnum = JEDEC_ATMEL;
11867 tg3_flag_set(tp, NVRAM_BUFFERED);
11868 tg3_flag_set(tp, FLASH);
11869 tp->nvram_pagesize = 264;
11871 case FLASH_5752VENDOR_ST_M45PE10:
11872 case FLASH_5752VENDOR_ST_M45PE20:
11873 case FLASH_5752VENDOR_ST_M45PE40:
11874 tp->nvram_jedecnum = JEDEC_ST;
11875 tg3_flag_set(tp, NVRAM_BUFFERED);
11876 tg3_flag_set(tp, FLASH);
11877 tp->nvram_pagesize = 256;
11882 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11884 u32 nvcfg1, protect = 0;
11886 nvcfg1 = tr32(NVRAM_CFG1);
11888 /* NVRAM protection for TPM */
11889 if (nvcfg1 & (1 << 27)) {
11890 tg3_flag_set(tp, PROTECTED_NVRAM);
11894 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11896 case FLASH_5761VENDOR_ATMEL_ADB021D:
11897 case FLASH_5761VENDOR_ATMEL_ADB041D:
11898 case FLASH_5761VENDOR_ATMEL_ADB081D:
11899 case FLASH_5761VENDOR_ATMEL_ADB161D:
11900 case FLASH_5761VENDOR_ATMEL_MDB021D:
11901 case FLASH_5761VENDOR_ATMEL_MDB041D:
11902 case FLASH_5761VENDOR_ATMEL_MDB081D:
11903 case FLASH_5761VENDOR_ATMEL_MDB161D:
11904 tp->nvram_jedecnum = JEDEC_ATMEL;
11905 tg3_flag_set(tp, NVRAM_BUFFERED);
11906 tg3_flag_set(tp, FLASH);
11907 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11908 tp->nvram_pagesize = 256;
11910 case FLASH_5761VENDOR_ST_A_M45PE20:
11911 case FLASH_5761VENDOR_ST_A_M45PE40:
11912 case FLASH_5761VENDOR_ST_A_M45PE80:
11913 case FLASH_5761VENDOR_ST_A_M45PE16:
11914 case FLASH_5761VENDOR_ST_M_M45PE20:
11915 case FLASH_5761VENDOR_ST_M_M45PE40:
11916 case FLASH_5761VENDOR_ST_M_M45PE80:
11917 case FLASH_5761VENDOR_ST_M_M45PE16:
11918 tp->nvram_jedecnum = JEDEC_ST;
11919 tg3_flag_set(tp, NVRAM_BUFFERED);
11920 tg3_flag_set(tp, FLASH);
11921 tp->nvram_pagesize = 256;
11926 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11929 case FLASH_5761VENDOR_ATMEL_ADB161D:
11930 case FLASH_5761VENDOR_ATMEL_MDB161D:
11931 case FLASH_5761VENDOR_ST_A_M45PE16:
11932 case FLASH_5761VENDOR_ST_M_M45PE16:
11933 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11935 case FLASH_5761VENDOR_ATMEL_ADB081D:
11936 case FLASH_5761VENDOR_ATMEL_MDB081D:
11937 case FLASH_5761VENDOR_ST_A_M45PE80:
11938 case FLASH_5761VENDOR_ST_M_M45PE80:
11939 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11941 case FLASH_5761VENDOR_ATMEL_ADB041D:
11942 case FLASH_5761VENDOR_ATMEL_MDB041D:
11943 case FLASH_5761VENDOR_ST_A_M45PE40:
11944 case FLASH_5761VENDOR_ST_M_M45PE40:
11945 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11947 case FLASH_5761VENDOR_ATMEL_ADB021D:
11948 case FLASH_5761VENDOR_ATMEL_MDB021D:
11949 case FLASH_5761VENDOR_ST_A_M45PE20:
11950 case FLASH_5761VENDOR_ST_M_M45PE20:
11951 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11957 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11959 tp->nvram_jedecnum = JEDEC_ATMEL;
11960 tg3_flag_set(tp, NVRAM_BUFFERED);
11961 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11964 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11968 nvcfg1 = tr32(NVRAM_CFG1);
11970 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11971 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11972 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11973 tp->nvram_jedecnum = JEDEC_ATMEL;
11974 tg3_flag_set(tp, NVRAM_BUFFERED);
11975 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11977 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11978 tw32(NVRAM_CFG1, nvcfg1);
11980 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11981 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11982 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11983 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11984 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11985 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11986 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11987 tp->nvram_jedecnum = JEDEC_ATMEL;
11988 tg3_flag_set(tp, NVRAM_BUFFERED);
11989 tg3_flag_set(tp, FLASH);
11991 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11992 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11993 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11994 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11995 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11997 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11998 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11999 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12001 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12002 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12003 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12007 case FLASH_5752VENDOR_ST_M45PE10:
12008 case FLASH_5752VENDOR_ST_M45PE20:
12009 case FLASH_5752VENDOR_ST_M45PE40:
12010 tp->nvram_jedecnum = JEDEC_ST;
12011 tg3_flag_set(tp, NVRAM_BUFFERED);
12012 tg3_flag_set(tp, FLASH);
12014 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12015 case FLASH_5752VENDOR_ST_M45PE10:
12016 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12018 case FLASH_5752VENDOR_ST_M45PE20:
12019 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12021 case FLASH_5752VENDOR_ST_M45PE40:
12022 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12027 tg3_flag_set(tp, NO_NVRAM);
12031 tg3_nvram_get_pagesize(tp, nvcfg1);
12032 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12033 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12037 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12041 nvcfg1 = tr32(NVRAM_CFG1);
12043 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12044 case FLASH_5717VENDOR_ATMEL_EEPROM:
12045 case FLASH_5717VENDOR_MICRO_EEPROM:
12046 tp->nvram_jedecnum = JEDEC_ATMEL;
12047 tg3_flag_set(tp, NVRAM_BUFFERED);
12048 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12050 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12051 tw32(NVRAM_CFG1, nvcfg1);
12053 case FLASH_5717VENDOR_ATMEL_MDB011D:
12054 case FLASH_5717VENDOR_ATMEL_ADB011B:
12055 case FLASH_5717VENDOR_ATMEL_ADB011D:
12056 case FLASH_5717VENDOR_ATMEL_MDB021D:
12057 case FLASH_5717VENDOR_ATMEL_ADB021B:
12058 case FLASH_5717VENDOR_ATMEL_ADB021D:
12059 case FLASH_5717VENDOR_ATMEL_45USPT:
12060 tp->nvram_jedecnum = JEDEC_ATMEL;
12061 tg3_flag_set(tp, NVRAM_BUFFERED);
12062 tg3_flag_set(tp, FLASH);
12064 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12065 case FLASH_5717VENDOR_ATMEL_MDB021D:
12066 /* Detect size with tg3_nvram_get_size() */
12068 case FLASH_5717VENDOR_ATMEL_ADB021B:
12069 case FLASH_5717VENDOR_ATMEL_ADB021D:
12070 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12073 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12077 case FLASH_5717VENDOR_ST_M_M25PE10:
12078 case FLASH_5717VENDOR_ST_A_M25PE10:
12079 case FLASH_5717VENDOR_ST_M_M45PE10:
12080 case FLASH_5717VENDOR_ST_A_M45PE10:
12081 case FLASH_5717VENDOR_ST_M_M25PE20:
12082 case FLASH_5717VENDOR_ST_A_M25PE20:
12083 case FLASH_5717VENDOR_ST_M_M45PE20:
12084 case FLASH_5717VENDOR_ST_A_M45PE20:
12085 case FLASH_5717VENDOR_ST_25USPT:
12086 case FLASH_5717VENDOR_ST_45USPT:
12087 tp->nvram_jedecnum = JEDEC_ST;
12088 tg3_flag_set(tp, NVRAM_BUFFERED);
12089 tg3_flag_set(tp, FLASH);
12091 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12092 case FLASH_5717VENDOR_ST_M_M25PE20:
12093 case FLASH_5717VENDOR_ST_M_M45PE20:
12094 /* Detect size with tg3_nvram_get_size() */
12096 case FLASH_5717VENDOR_ST_A_M25PE20:
12097 case FLASH_5717VENDOR_ST_A_M45PE20:
12098 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12101 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12106 tg3_flag_set(tp, NO_NVRAM);
12110 tg3_nvram_get_pagesize(tp, nvcfg1);
12111 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12112 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12115 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12117 u32 nvcfg1, nvmpinstrp;
12119 nvcfg1 = tr32(NVRAM_CFG1);
12120 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12122 switch (nvmpinstrp) {
12123 case FLASH_5720_EEPROM_HD:
12124 case FLASH_5720_EEPROM_LD:
12125 tp->nvram_jedecnum = JEDEC_ATMEL;
12126 tg3_flag_set(tp, NVRAM_BUFFERED);
12128 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12129 tw32(NVRAM_CFG1, nvcfg1);
12130 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12131 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12133 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12135 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12136 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12137 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12138 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12139 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12140 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12141 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12142 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12143 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12144 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12145 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12146 case FLASH_5720VENDOR_ATMEL_45USPT:
12147 tp->nvram_jedecnum = JEDEC_ATMEL;
12148 tg3_flag_set(tp, NVRAM_BUFFERED);
12149 tg3_flag_set(tp, FLASH);
12151 switch (nvmpinstrp) {
12152 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12153 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12154 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12155 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12157 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12158 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12159 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12160 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12162 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12163 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12164 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12167 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12171 case FLASH_5720VENDOR_M_ST_M25PE10:
12172 case FLASH_5720VENDOR_M_ST_M45PE10:
12173 case FLASH_5720VENDOR_A_ST_M25PE10:
12174 case FLASH_5720VENDOR_A_ST_M45PE10:
12175 case FLASH_5720VENDOR_M_ST_M25PE20:
12176 case FLASH_5720VENDOR_M_ST_M45PE20:
12177 case FLASH_5720VENDOR_A_ST_M25PE20:
12178 case FLASH_5720VENDOR_A_ST_M45PE20:
12179 case FLASH_5720VENDOR_M_ST_M25PE40:
12180 case FLASH_5720VENDOR_M_ST_M45PE40:
12181 case FLASH_5720VENDOR_A_ST_M25PE40:
12182 case FLASH_5720VENDOR_A_ST_M45PE40:
12183 case FLASH_5720VENDOR_M_ST_M25PE80:
12184 case FLASH_5720VENDOR_M_ST_M45PE80:
12185 case FLASH_5720VENDOR_A_ST_M25PE80:
12186 case FLASH_5720VENDOR_A_ST_M45PE80:
12187 case FLASH_5720VENDOR_ST_25USPT:
12188 case FLASH_5720VENDOR_ST_45USPT:
12189 tp->nvram_jedecnum = JEDEC_ST;
12190 tg3_flag_set(tp, NVRAM_BUFFERED);
12191 tg3_flag_set(tp, FLASH);
12193 switch (nvmpinstrp) {
12194 case FLASH_5720VENDOR_M_ST_M25PE20:
12195 case FLASH_5720VENDOR_M_ST_M45PE20:
12196 case FLASH_5720VENDOR_A_ST_M25PE20:
12197 case FLASH_5720VENDOR_A_ST_M45PE20:
12198 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12200 case FLASH_5720VENDOR_M_ST_M25PE40:
12201 case FLASH_5720VENDOR_M_ST_M45PE40:
12202 case FLASH_5720VENDOR_A_ST_M25PE40:
12203 case FLASH_5720VENDOR_A_ST_M45PE40:
12204 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12206 case FLASH_5720VENDOR_M_ST_M25PE80:
12207 case FLASH_5720VENDOR_M_ST_M45PE80:
12208 case FLASH_5720VENDOR_A_ST_M25PE80:
12209 case FLASH_5720VENDOR_A_ST_M45PE80:
12210 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12213 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12218 tg3_flag_set(tp, NO_NVRAM);
12222 tg3_nvram_get_pagesize(tp, nvcfg1);
12223 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12224 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12227 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12228 static void __devinit tg3_nvram_init(struct tg3 *tp)
12230 tw32_f(GRC_EEPROM_ADDR,
12231 (EEPROM_ADDR_FSM_RESET |
12232 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12233 EEPROM_ADDR_CLKPERD_SHIFT)));
12237 /* Enable seeprom accesses. */
12238 tw32_f(GRC_LOCAL_CTRL,
12239 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12242 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12243 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12244 tg3_flag_set(tp, NVRAM);
12246 if (tg3_nvram_lock(tp)) {
12247 netdev_warn(tp->dev,
12248 "Cannot get nvram lock, %s failed\n",
12252 tg3_enable_nvram_access(tp);
12254 tp->nvram_size = 0;
12256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12257 tg3_get_5752_nvram_info(tp);
12258 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12259 tg3_get_5755_nvram_info(tp);
12260 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12261 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12262 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12263 tg3_get_5787_nvram_info(tp);
12264 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12265 tg3_get_5761_nvram_info(tp);
12266 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12267 tg3_get_5906_nvram_info(tp);
12268 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12269 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12270 tg3_get_57780_nvram_info(tp);
12271 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12272 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12273 tg3_get_5717_nvram_info(tp);
12274 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12275 tg3_get_5720_nvram_info(tp);
12277 tg3_get_nvram_info(tp);
12279 if (tp->nvram_size == 0)
12280 tg3_get_nvram_size(tp);
12282 tg3_disable_nvram_access(tp);
12283 tg3_nvram_unlock(tp);
12286 tg3_flag_clear(tp, NVRAM);
12287 tg3_flag_clear(tp, NVRAM_BUFFERED);
12289 tg3_get_eeprom_size(tp);
12293 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12294 u32 offset, u32 len, u8 *buf)
12299 for (i = 0; i < len; i += 4) {
12305 memcpy(&data, buf + i, 4);
12308 * The SEEPROM interface expects the data to always be opposite
12309 * the native endian format. We accomplish this by reversing
12310 * all the operations that would have been performed on the
12311 * data from a call to tg3_nvram_read_be32().
12313 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12315 val = tr32(GRC_EEPROM_ADDR);
12316 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12318 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12320 tw32(GRC_EEPROM_ADDR, val |
12321 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12322 (addr & EEPROM_ADDR_ADDR_MASK) |
12323 EEPROM_ADDR_START |
12324 EEPROM_ADDR_WRITE);
12326 for (j = 0; j < 1000; j++) {
12327 val = tr32(GRC_EEPROM_ADDR);
12329 if (val & EEPROM_ADDR_COMPLETE)
12333 if (!(val & EEPROM_ADDR_COMPLETE)) {
12342 /* offset and length are dword aligned */
12343 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12347 u32 pagesize = tp->nvram_pagesize;
12348 u32 pagemask = pagesize - 1;
12352 tmp = kmalloc(pagesize, GFP_KERNEL);
12358 u32 phy_addr, page_off, size;
12360 phy_addr = offset & ~pagemask;
12362 for (j = 0; j < pagesize; j += 4) {
12363 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12364 (__be32 *) (tmp + j));
12371 page_off = offset & pagemask;
12378 memcpy(tmp + page_off, buf, size);
12380 offset = offset + (pagesize - page_off);
12382 tg3_enable_nvram_access(tp);
12385 * Before we can erase the flash page, we need
12386 * to issue a special "write enable" command.
12388 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12390 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12393 /* Erase the target page */
12394 tw32(NVRAM_ADDR, phy_addr);
12396 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12397 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12399 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12402 /* Issue another write enable to start the write. */
12403 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12405 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12408 for (j = 0; j < pagesize; j += 4) {
12411 data = *((__be32 *) (tmp + j));
12413 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12415 tw32(NVRAM_ADDR, phy_addr + j);
12417 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12421 nvram_cmd |= NVRAM_CMD_FIRST;
12422 else if (j == (pagesize - 4))
12423 nvram_cmd |= NVRAM_CMD_LAST;
12425 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12432 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12433 tg3_nvram_exec_cmd(tp, nvram_cmd);
12440 /* offset and length are dword aligned */
12441 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12446 for (i = 0; i < len; i += 4, offset += 4) {
12447 u32 page_off, phy_addr, nvram_cmd;
12450 memcpy(&data, buf + i, 4);
12451 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12453 page_off = offset % tp->nvram_pagesize;
12455 phy_addr = tg3_nvram_phys_addr(tp, offset);
12457 tw32(NVRAM_ADDR, phy_addr);
12459 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12461 if (page_off == 0 || i == 0)
12462 nvram_cmd |= NVRAM_CMD_FIRST;
12463 if (page_off == (tp->nvram_pagesize - 4))
12464 nvram_cmd |= NVRAM_CMD_LAST;
12466 if (i == (len - 4))
12467 nvram_cmd |= NVRAM_CMD_LAST;
12469 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12470 !tg3_flag(tp, 5755_PLUS) &&
12471 (tp->nvram_jedecnum == JEDEC_ST) &&
12472 (nvram_cmd & NVRAM_CMD_FIRST)) {
12474 if ((ret = tg3_nvram_exec_cmd(tp,
12475 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12480 if (!tg3_flag(tp, FLASH)) {
12481 /* We always do complete word writes to eeprom. */
12482 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12485 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12491 /* offset and length are dword aligned */
12492 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12496 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12497 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12498 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12502 if (!tg3_flag(tp, NVRAM)) {
12503 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12507 ret = tg3_nvram_lock(tp);
12511 tg3_enable_nvram_access(tp);
12512 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12513 tw32(NVRAM_WRITE1, 0x406);
12515 grc_mode = tr32(GRC_MODE);
12516 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12518 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12519 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12522 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12526 grc_mode = tr32(GRC_MODE);
12527 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12529 tg3_disable_nvram_access(tp);
12530 tg3_nvram_unlock(tp);
12533 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12534 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12541 struct subsys_tbl_ent {
12542 u16 subsys_vendor, subsys_devid;
12546 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12547 /* Broadcom boards. */
12548 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12549 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12550 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12551 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12552 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12553 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12554 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12555 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12556 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12557 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12558 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12559 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12560 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12561 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12562 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12563 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12564 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12565 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12566 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12567 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12568 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12569 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12572 { TG3PCI_SUBVENDOR_ID_3COM,
12573 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12574 { TG3PCI_SUBVENDOR_ID_3COM,
12575 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12576 { TG3PCI_SUBVENDOR_ID_3COM,
12577 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12578 { TG3PCI_SUBVENDOR_ID_3COM,
12579 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12580 { TG3PCI_SUBVENDOR_ID_3COM,
12581 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12584 { TG3PCI_SUBVENDOR_ID_DELL,
12585 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12586 { TG3PCI_SUBVENDOR_ID_DELL,
12587 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12588 { TG3PCI_SUBVENDOR_ID_DELL,
12589 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12590 { TG3PCI_SUBVENDOR_ID_DELL,
12591 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12593 /* Compaq boards. */
12594 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12595 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12596 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12597 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12598 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12599 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12600 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12601 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12602 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12603 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12606 { TG3PCI_SUBVENDOR_ID_IBM,
12607 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12610 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12614 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12615 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12616 tp->pdev->subsystem_vendor) &&
12617 (subsys_id_to_phy_id[i].subsys_devid ==
12618 tp->pdev->subsystem_device))
12619 return &subsys_id_to_phy_id[i];
12624 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12629 /* On some early chips the SRAM cannot be accessed in D3hot state,
12630 * so need make sure we're in D0.
12632 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12633 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12634 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12637 /* Make sure register accesses (indirect or otherwise)
12638 * will function correctly.
12640 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12641 tp->misc_host_ctrl);
12643 /* The memory arbiter has to be enabled in order for SRAM accesses
12644 * to succeed. Normally on powerup the tg3 chip firmware will make
12645 * sure it is enabled, but other entities such as system netboot
12646 * code might disable it.
12648 val = tr32(MEMARB_MODE);
12649 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12651 tp->phy_id = TG3_PHY_ID_INVALID;
12652 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12654 /* Assume an onboard device and WOL capable by default. */
12655 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12656 tg3_flag_set(tp, WOL_CAP);
12658 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12659 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12660 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12661 tg3_flag_set(tp, IS_NIC);
12663 val = tr32(VCPU_CFGSHDW);
12664 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12665 tg3_flag_set(tp, ASPM_WORKAROUND);
12666 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12667 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12668 tg3_flag_set(tp, WOL_ENABLE);
12669 device_set_wakeup_enable(&tp->pdev->dev, true);
12674 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12675 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12676 u32 nic_cfg, led_cfg;
12677 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12678 int eeprom_phy_serdes = 0;
12680 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12681 tp->nic_sram_data_cfg = nic_cfg;
12683 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12684 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12685 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12686 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12687 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12688 (ver > 0) && (ver < 0x100))
12689 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12691 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12692 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12694 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12695 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12696 eeprom_phy_serdes = 1;
12698 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12699 if (nic_phy_id != 0) {
12700 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12701 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12703 eeprom_phy_id = (id1 >> 16) << 10;
12704 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12705 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12709 tp->phy_id = eeprom_phy_id;
12710 if (eeprom_phy_serdes) {
12711 if (!tg3_flag(tp, 5705_PLUS))
12712 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12714 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12717 if (tg3_flag(tp, 5750_PLUS))
12718 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12719 SHASTA_EXT_LED_MODE_MASK);
12721 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12725 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12726 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12729 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12730 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12733 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12734 tp->led_ctrl = LED_CTRL_MODE_MAC;
12736 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12737 * read on some older 5700/5701 bootcode.
12739 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12741 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12743 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12747 case SHASTA_EXT_LED_SHARED:
12748 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12749 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12750 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12751 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12752 LED_CTRL_MODE_PHY_2);
12755 case SHASTA_EXT_LED_MAC:
12756 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12759 case SHASTA_EXT_LED_COMBO:
12760 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12761 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12762 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12763 LED_CTRL_MODE_PHY_2);
12768 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12769 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12770 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12771 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12773 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12774 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12776 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12777 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12778 if ((tp->pdev->subsystem_vendor ==
12779 PCI_VENDOR_ID_ARIMA) &&
12780 (tp->pdev->subsystem_device == 0x205a ||
12781 tp->pdev->subsystem_device == 0x2063))
12782 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12784 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12785 tg3_flag_set(tp, IS_NIC);
12788 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12789 tg3_flag_set(tp, ENABLE_ASF);
12790 if (tg3_flag(tp, 5750_PLUS))
12791 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12794 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12795 tg3_flag(tp, 5750_PLUS))
12796 tg3_flag_set(tp, ENABLE_APE);
12798 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12799 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12800 tg3_flag_clear(tp, WOL_CAP);
12802 if (tg3_flag(tp, WOL_CAP) &&
12803 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12804 tg3_flag_set(tp, WOL_ENABLE);
12805 device_set_wakeup_enable(&tp->pdev->dev, true);
12808 if (cfg2 & (1 << 17))
12809 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12811 /* serdes signal pre-emphasis in register 0x590 set by */
12812 /* bootcode if bit 18 is set */
12813 if (cfg2 & (1 << 18))
12814 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12816 if ((tg3_flag(tp, 57765_PLUS) ||
12817 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12818 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12819 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12820 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12822 if (tg3_flag(tp, PCI_EXPRESS) &&
12823 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12824 !tg3_flag(tp, 57765_PLUS)) {
12827 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12828 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12829 tg3_flag_set(tp, ASPM_WORKAROUND);
12832 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12833 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12834 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12835 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12836 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12837 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12840 if (tg3_flag(tp, WOL_CAP))
12841 device_set_wakeup_enable(&tp->pdev->dev,
12842 tg3_flag(tp, WOL_ENABLE));
12844 device_set_wakeup_capable(&tp->pdev->dev, false);
12847 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12852 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12853 tw32(OTP_CTRL, cmd);
12855 /* Wait for up to 1 ms for command to execute. */
12856 for (i = 0; i < 100; i++) {
12857 val = tr32(OTP_STATUS);
12858 if (val & OTP_STATUS_CMD_DONE)
12863 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12866 /* Read the gphy configuration from the OTP region of the chip. The gphy
12867 * configuration is a 32-bit value that straddles the alignment boundary.
12868 * We do two 32-bit reads and then shift and merge the results.
12870 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12872 u32 bhalf_otp, thalf_otp;
12874 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12876 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12879 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12881 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12884 thalf_otp = tr32(OTP_READ_DATA);
12886 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12888 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12891 bhalf_otp = tr32(OTP_READ_DATA);
12893 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12896 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12898 u32 adv = ADVERTISED_Autoneg |
12901 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12902 adv |= ADVERTISED_1000baseT_Half |
12903 ADVERTISED_1000baseT_Full;
12905 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12906 adv |= ADVERTISED_100baseT_Half |
12907 ADVERTISED_100baseT_Full |
12908 ADVERTISED_10baseT_Half |
12909 ADVERTISED_10baseT_Full |
12912 adv |= ADVERTISED_FIBRE;
12914 tp->link_config.advertising = adv;
12915 tp->link_config.speed = SPEED_INVALID;
12916 tp->link_config.duplex = DUPLEX_INVALID;
12917 tp->link_config.autoneg = AUTONEG_ENABLE;
12918 tp->link_config.active_speed = SPEED_INVALID;
12919 tp->link_config.active_duplex = DUPLEX_INVALID;
12920 tp->link_config.orig_speed = SPEED_INVALID;
12921 tp->link_config.orig_duplex = DUPLEX_INVALID;
12922 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12925 static int __devinit tg3_phy_probe(struct tg3 *tp)
12927 u32 hw_phy_id_1, hw_phy_id_2;
12928 u32 hw_phy_id, hw_phy_id_masked;
12931 /* flow control autonegotiation is default behavior */
12932 tg3_flag_set(tp, PAUSE_AUTONEG);
12933 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12935 if (tg3_flag(tp, USE_PHYLIB))
12936 return tg3_phy_init(tp);
12938 /* Reading the PHY ID register can conflict with ASF
12939 * firmware access to the PHY hardware.
12942 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12943 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12945 /* Now read the physical PHY_ID from the chip and verify
12946 * that it is sane. If it doesn't look good, we fall back
12947 * to either the hard-coded table based PHY_ID and failing
12948 * that the value found in the eeprom area.
12950 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12951 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12953 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12954 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12955 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12957 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12960 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12961 tp->phy_id = hw_phy_id;
12962 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12963 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12965 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12967 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12968 /* Do nothing, phy ID already set up in
12969 * tg3_get_eeprom_hw_cfg().
12972 struct subsys_tbl_ent *p;
12974 /* No eeprom signature? Try the hardcoded
12975 * subsys device table.
12977 p = tg3_lookup_by_subsys(tp);
12981 tp->phy_id = p->phy_id;
12983 tp->phy_id == TG3_PHY_ID_BCM8002)
12984 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12988 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12989 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12990 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12991 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12992 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12993 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12995 tg3_phy_init_link_config(tp);
12997 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12998 !tg3_flag(tp, ENABLE_APE) &&
12999 !tg3_flag(tp, ENABLE_ASF)) {
13002 tg3_readphy(tp, MII_BMSR, &bmsr);
13003 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13004 (bmsr & BMSR_LSTATUS))
13005 goto skip_phy_reset;
13007 err = tg3_phy_reset(tp);
13011 tg3_phy_set_wirespeed(tp);
13013 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13014 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13015 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13016 if (!tg3_copper_is_advertising_all(tp, mask)) {
13017 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13018 tp->link_config.flowctrl);
13020 tg3_writephy(tp, MII_BMCR,
13021 BMCR_ANENABLE | BMCR_ANRESTART);
13026 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13027 err = tg3_init_5401phy_dsp(tp);
13031 err = tg3_init_5401phy_dsp(tp);
13037 static void __devinit tg3_read_vpd(struct tg3 *tp)
13040 unsigned int block_end, rosize, len;
13043 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13047 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13048 PCI_VPD_LRDT_RO_DATA);
13050 goto out_not_found;
13052 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13053 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13054 i += PCI_VPD_LRDT_TAG_SIZE;
13056 if (block_end > TG3_NVM_VPD_LEN)
13057 goto out_not_found;
13059 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13060 PCI_VPD_RO_KEYWORD_MFR_ID);
13062 len = pci_vpd_info_field_size(&vpd_data[j]);
13064 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13065 if (j + len > block_end || len != 4 ||
13066 memcmp(&vpd_data[j], "1028", 4))
13069 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13070 PCI_VPD_RO_KEYWORD_VENDOR0);
13074 len = pci_vpd_info_field_size(&vpd_data[j]);
13076 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13077 if (j + len > block_end)
13080 memcpy(tp->fw_ver, &vpd_data[j], len);
13081 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13085 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13086 PCI_VPD_RO_KEYWORD_PARTNO);
13088 goto out_not_found;
13090 len = pci_vpd_info_field_size(&vpd_data[i]);
13092 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13093 if (len > TG3_BPN_SIZE ||
13094 (len + i) > TG3_NVM_VPD_LEN)
13095 goto out_not_found;
13097 memcpy(tp->board_part_number, &vpd_data[i], len);
13101 if (tp->board_part_number[0])
13105 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13106 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13107 strcpy(tp->board_part_number, "BCM5717");
13108 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13109 strcpy(tp->board_part_number, "BCM5718");
13112 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13113 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13114 strcpy(tp->board_part_number, "BCM57780");
13115 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13116 strcpy(tp->board_part_number, "BCM57760");
13117 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13118 strcpy(tp->board_part_number, "BCM57790");
13119 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13120 strcpy(tp->board_part_number, "BCM57788");
13123 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13124 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13125 strcpy(tp->board_part_number, "BCM57761");
13126 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13127 strcpy(tp->board_part_number, "BCM57765");
13128 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13129 strcpy(tp->board_part_number, "BCM57781");
13130 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13131 strcpy(tp->board_part_number, "BCM57785");
13132 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13133 strcpy(tp->board_part_number, "BCM57791");
13134 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13135 strcpy(tp->board_part_number, "BCM57795");
13138 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13139 strcpy(tp->board_part_number, "BCM95906");
13142 strcpy(tp->board_part_number, "none");
13146 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13150 if (tg3_nvram_read(tp, offset, &val) ||
13151 (val & 0xfc000000) != 0x0c000000 ||
13152 tg3_nvram_read(tp, offset + 4, &val) ||
13159 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13161 u32 val, offset, start, ver_offset;
13163 bool newver = false;
13165 if (tg3_nvram_read(tp, 0xc, &offset) ||
13166 tg3_nvram_read(tp, 0x4, &start))
13169 offset = tg3_nvram_logical_addr(tp, offset);
13171 if (tg3_nvram_read(tp, offset, &val))
13174 if ((val & 0xfc000000) == 0x0c000000) {
13175 if (tg3_nvram_read(tp, offset + 4, &val))
13182 dst_off = strlen(tp->fw_ver);
13185 if (TG3_VER_SIZE - dst_off < 16 ||
13186 tg3_nvram_read(tp, offset + 8, &ver_offset))
13189 offset = offset + ver_offset - start;
13190 for (i = 0; i < 16; i += 4) {
13192 if (tg3_nvram_read_be32(tp, offset + i, &v))
13195 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13200 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13203 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13204 TG3_NVM_BCVER_MAJSFT;
13205 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13206 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13207 "v%d.%02d", major, minor);
13211 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13213 u32 val, major, minor;
13215 /* Use native endian representation */
13216 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13219 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13220 TG3_NVM_HWSB_CFG1_MAJSFT;
13221 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13222 TG3_NVM_HWSB_CFG1_MINSFT;
13224 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13227 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13229 u32 offset, major, minor, build;
13231 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13233 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13236 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13237 case TG3_EEPROM_SB_REVISION_0:
13238 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13240 case TG3_EEPROM_SB_REVISION_2:
13241 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13243 case TG3_EEPROM_SB_REVISION_3:
13244 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13246 case TG3_EEPROM_SB_REVISION_4:
13247 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13249 case TG3_EEPROM_SB_REVISION_5:
13250 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13252 case TG3_EEPROM_SB_REVISION_6:
13253 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13259 if (tg3_nvram_read(tp, offset, &val))
13262 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13263 TG3_EEPROM_SB_EDH_BLD_SHFT;
13264 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13265 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13266 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13268 if (minor > 99 || build > 26)
13271 offset = strlen(tp->fw_ver);
13272 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13273 " v%d.%02d", major, minor);
13276 offset = strlen(tp->fw_ver);
13277 if (offset < TG3_VER_SIZE - 1)
13278 tp->fw_ver[offset] = 'a' + build - 1;
13282 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13284 u32 val, offset, start;
13287 for (offset = TG3_NVM_DIR_START;
13288 offset < TG3_NVM_DIR_END;
13289 offset += TG3_NVM_DIRENT_SIZE) {
13290 if (tg3_nvram_read(tp, offset, &val))
13293 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13297 if (offset == TG3_NVM_DIR_END)
13300 if (!tg3_flag(tp, 5705_PLUS))
13301 start = 0x08000000;
13302 else if (tg3_nvram_read(tp, offset - 4, &start))
13305 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13306 !tg3_fw_img_is_valid(tp, offset) ||
13307 tg3_nvram_read(tp, offset + 8, &val))
13310 offset += val - start;
13312 vlen = strlen(tp->fw_ver);
13314 tp->fw_ver[vlen++] = ',';
13315 tp->fw_ver[vlen++] = ' ';
13317 for (i = 0; i < 4; i++) {
13319 if (tg3_nvram_read_be32(tp, offset, &v))
13322 offset += sizeof(v);
13324 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13325 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13329 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13334 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13340 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13343 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13344 if (apedata != APE_SEG_SIG_MAGIC)
13347 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13348 if (!(apedata & APE_FW_STATUS_READY))
13351 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13353 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13354 tg3_flag_set(tp, APE_HAS_NCSI);
13360 vlen = strlen(tp->fw_ver);
13362 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13364 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13365 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13366 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13367 (apedata & APE_FW_VERSION_BLDMSK));
13370 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13373 bool vpd_vers = false;
13375 if (tp->fw_ver[0] != 0)
13378 if (tg3_flag(tp, NO_NVRAM)) {
13379 strcat(tp->fw_ver, "sb");
13383 if (tg3_nvram_read(tp, 0, &val))
13386 if (val == TG3_EEPROM_MAGIC)
13387 tg3_read_bc_ver(tp);
13388 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13389 tg3_read_sb_ver(tp, val);
13390 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13391 tg3_read_hwsb_ver(tp);
13395 if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13398 tg3_read_mgmtfw_ver(tp);
13401 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13404 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13406 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13408 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13409 return TG3_RX_RET_MAX_SIZE_5717;
13410 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13411 return TG3_RX_RET_MAX_SIZE_5700;
13413 return TG3_RX_RET_MAX_SIZE_5705;
13416 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13417 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13418 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13419 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13423 static int __devinit tg3_get_invariants(struct tg3 *tp)
13426 u32 pci_state_reg, grc_misc_cfg;
13431 /* Force memory write invalidate off. If we leave it on,
13432 * then on 5700_BX chips we have to enable a workaround.
13433 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13434 * to match the cacheline size. The Broadcom driver have this
13435 * workaround but turns MWI off all the times so never uses
13436 * it. This seems to suggest that the workaround is insufficient.
13438 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13439 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13440 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13442 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13443 * has the register indirect write enable bit set before
13444 * we try to access any of the MMIO registers. It is also
13445 * critical that the PCI-X hw workaround situation is decided
13446 * before that as well.
13448 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13451 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13452 MISC_HOST_CTRL_CHIPREV_SHIFT);
13453 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13454 u32 prod_id_asic_rev;
13456 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13457 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13458 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13459 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13460 pci_read_config_dword(tp->pdev,
13461 TG3PCI_GEN2_PRODID_ASICREV,
13462 &prod_id_asic_rev);
13463 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13464 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13465 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13466 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13467 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13468 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13469 pci_read_config_dword(tp->pdev,
13470 TG3PCI_GEN15_PRODID_ASICREV,
13471 &prod_id_asic_rev);
13473 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13474 &prod_id_asic_rev);
13476 tp->pci_chip_rev_id = prod_id_asic_rev;
13479 /* Wrong chip ID in 5752 A0. This code can be removed later
13480 * as A0 is not in production.
13482 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13483 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13485 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13486 * we need to disable memory and use config. cycles
13487 * only to access all registers. The 5702/03 chips
13488 * can mistakenly decode the special cycles from the
13489 * ICH chipsets as memory write cycles, causing corruption
13490 * of register and memory space. Only certain ICH bridges
13491 * will drive special cycles with non-zero data during the
13492 * address phase which can fall within the 5703's address
13493 * range. This is not an ICH bug as the PCI spec allows
13494 * non-zero address during special cycles. However, only
13495 * these ICH bridges are known to drive non-zero addresses
13496 * during special cycles.
13498 * Since special cycles do not cross PCI bridges, we only
13499 * enable this workaround if the 5703 is on the secondary
13500 * bus of these ICH bridges.
13502 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13503 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13504 static struct tg3_dev_id {
13508 } ich_chipsets[] = {
13509 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13511 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13513 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13515 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13519 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13520 struct pci_dev *bridge = NULL;
13522 while (pci_id->vendor != 0) {
13523 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13529 if (pci_id->rev != PCI_ANY_ID) {
13530 if (bridge->revision > pci_id->rev)
13533 if (bridge->subordinate &&
13534 (bridge->subordinate->number ==
13535 tp->pdev->bus->number)) {
13536 tg3_flag_set(tp, ICH_WORKAROUND);
13537 pci_dev_put(bridge);
13543 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13544 static struct tg3_dev_id {
13547 } bridge_chipsets[] = {
13548 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13549 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13552 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13553 struct pci_dev *bridge = NULL;
13555 while (pci_id->vendor != 0) {
13556 bridge = pci_get_device(pci_id->vendor,
13563 if (bridge->subordinate &&
13564 (bridge->subordinate->number <=
13565 tp->pdev->bus->number) &&
13566 (bridge->subordinate->subordinate >=
13567 tp->pdev->bus->number)) {
13568 tg3_flag_set(tp, 5701_DMA_BUG);
13569 pci_dev_put(bridge);
13575 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13576 * DMA addresses > 40-bit. This bridge may have other additional
13577 * 57xx devices behind it in some 4-port NIC designs for example.
13578 * Any tg3 device found behind the bridge will also need the 40-bit
13581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13582 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13583 tg3_flag_set(tp, 5780_CLASS);
13584 tg3_flag_set(tp, 40BIT_DMA_BUG);
13585 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13587 struct pci_dev *bridge = NULL;
13590 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13591 PCI_DEVICE_ID_SERVERWORKS_EPB,
13593 if (bridge && bridge->subordinate &&
13594 (bridge->subordinate->number <=
13595 tp->pdev->bus->number) &&
13596 (bridge->subordinate->subordinate >=
13597 tp->pdev->bus->number)) {
13598 tg3_flag_set(tp, 40BIT_DMA_BUG);
13599 pci_dev_put(bridge);
13605 /* Initialize misc host control in PCI block. */
13606 tp->misc_host_ctrl |= (misc_ctrl_reg &
13607 MISC_HOST_CTRL_CHIPREV);
13608 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13609 tp->misc_host_ctrl);
13611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13614 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13615 tp->pdev_peer = tg3_find_peer(tp);
13617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13618 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13619 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13620 tg3_flag_set(tp, 5717_PLUS);
13622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13623 tg3_flag(tp, 5717_PLUS))
13624 tg3_flag_set(tp, 57765_PLUS);
13626 /* Intentionally exclude ASIC_REV_5906 */
13627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13628 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13629 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13630 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13631 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13632 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13633 tg3_flag(tp, 57765_PLUS))
13634 tg3_flag_set(tp, 5755_PLUS);
13636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13639 tg3_flag(tp, 5755_PLUS) ||
13640 tg3_flag(tp, 5780_CLASS))
13641 tg3_flag_set(tp, 5750_PLUS);
13643 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13644 tg3_flag(tp, 5750_PLUS))
13645 tg3_flag_set(tp, 5705_PLUS);
13647 /* Determine TSO capabilities */
13648 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13649 ; /* Do nothing. HW bug. */
13650 else if (tg3_flag(tp, 57765_PLUS))
13651 tg3_flag_set(tp, HW_TSO_3);
13652 else if (tg3_flag(tp, 5755_PLUS) ||
13653 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13654 tg3_flag_set(tp, HW_TSO_2);
13655 else if (tg3_flag(tp, 5750_PLUS)) {
13656 tg3_flag_set(tp, HW_TSO_1);
13657 tg3_flag_set(tp, TSO_BUG);
13658 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13659 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13660 tg3_flag_clear(tp, TSO_BUG);
13661 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13662 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13663 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13664 tg3_flag_set(tp, TSO_BUG);
13665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13666 tp->fw_needed = FIRMWARE_TG3TSO5;
13668 tp->fw_needed = FIRMWARE_TG3TSO;
13671 /* Selectively allow TSO based on operating conditions */
13672 if (tg3_flag(tp, HW_TSO_1) ||
13673 tg3_flag(tp, HW_TSO_2) ||
13674 tg3_flag(tp, HW_TSO_3) ||
13675 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13676 tg3_flag_set(tp, TSO_CAPABLE);
13678 tg3_flag_clear(tp, TSO_CAPABLE);
13679 tg3_flag_clear(tp, TSO_BUG);
13680 tp->fw_needed = NULL;
13683 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13684 tp->fw_needed = FIRMWARE_TG3;
13688 if (tg3_flag(tp, 5750_PLUS)) {
13689 tg3_flag_set(tp, SUPPORT_MSI);
13690 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13691 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13692 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13693 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13694 tp->pdev_peer == tp->pdev))
13695 tg3_flag_clear(tp, SUPPORT_MSI);
13697 if (tg3_flag(tp, 5755_PLUS) ||
13698 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13699 tg3_flag_set(tp, 1SHOT_MSI);
13702 if (tg3_flag(tp, 57765_PLUS)) {
13703 tg3_flag_set(tp, SUPPORT_MSIX);
13704 tp->irq_max = TG3_IRQ_MAX_VECS;
13708 /* All chips can get confused if TX buffers
13709 * straddle the 4GB address boundary.
13711 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13713 if (tg3_flag(tp, 5755_PLUS))
13714 tg3_flag_set(tp, SHORT_DMA_BUG);
13716 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13718 if (tg3_flag(tp, 5717_PLUS))
13719 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13721 if (tg3_flag(tp, 57765_PLUS) &&
13722 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13723 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13725 if (!tg3_flag(tp, 5705_PLUS) ||
13726 tg3_flag(tp, 5780_CLASS) ||
13727 tg3_flag(tp, USE_JUMBO_BDFLAG))
13728 tg3_flag_set(tp, JUMBO_CAPABLE);
13730 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13733 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13734 if (tp->pcie_cap != 0) {
13737 tg3_flag_set(tp, PCI_EXPRESS);
13739 tp->pcie_readrq = 4096;
13740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13741 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13742 tp->pcie_readrq = 2048;
13744 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13746 pci_read_config_word(tp->pdev,
13747 tp->pcie_cap + PCI_EXP_LNKCTL,
13749 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13750 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13752 tg3_flag_clear(tp, HW_TSO_2);
13753 tg3_flag_clear(tp, TSO_CAPABLE);
13755 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13756 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13757 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13758 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13759 tg3_flag_set(tp, CLKREQ_BUG);
13760 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13761 tg3_flag_set(tp, L1PLLPD_EN);
13763 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13764 tg3_flag_set(tp, PCI_EXPRESS);
13765 } else if (!tg3_flag(tp, 5705_PLUS) ||
13766 tg3_flag(tp, 5780_CLASS)) {
13767 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13768 if (!tp->pcix_cap) {
13769 dev_err(&tp->pdev->dev,
13770 "Cannot find PCI-X capability, aborting\n");
13774 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13775 tg3_flag_set(tp, PCIX_MODE);
13778 /* If we have an AMD 762 or VIA K8T800 chipset, write
13779 * reordering to the mailbox registers done by the host
13780 * controller can cause major troubles. We read back from
13781 * every mailbox register write to force the writes to be
13782 * posted to the chip in order.
13784 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13785 !tg3_flag(tp, PCI_EXPRESS))
13786 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13788 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13789 &tp->pci_cacheline_sz);
13790 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13791 &tp->pci_lat_timer);
13792 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13793 tp->pci_lat_timer < 64) {
13794 tp->pci_lat_timer = 64;
13795 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13796 tp->pci_lat_timer);
13799 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13800 /* 5700 BX chips need to have their TX producer index
13801 * mailboxes written twice to workaround a bug.
13803 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13805 /* If we are in PCI-X mode, enable register write workaround.
13807 * The workaround is to use indirect register accesses
13808 * for all chip writes not to mailbox registers.
13810 if (tg3_flag(tp, PCIX_MODE)) {
13813 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13815 /* The chip can have it's power management PCI config
13816 * space registers clobbered due to this bug.
13817 * So explicitly force the chip into D0 here.
13819 pci_read_config_dword(tp->pdev,
13820 tp->pm_cap + PCI_PM_CTRL,
13822 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13823 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13824 pci_write_config_dword(tp->pdev,
13825 tp->pm_cap + PCI_PM_CTRL,
13828 /* Also, force SERR#/PERR# in PCI command. */
13829 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13830 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13831 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13835 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13836 tg3_flag_set(tp, PCI_HIGH_SPEED);
13837 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13838 tg3_flag_set(tp, PCI_32BIT);
13840 /* Chip-specific fixup from Broadcom driver */
13841 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13842 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13843 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13844 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13847 /* Default fast path register access methods */
13848 tp->read32 = tg3_read32;
13849 tp->write32 = tg3_write32;
13850 tp->read32_mbox = tg3_read32;
13851 tp->write32_mbox = tg3_write32;
13852 tp->write32_tx_mbox = tg3_write32;
13853 tp->write32_rx_mbox = tg3_write32;
13855 /* Various workaround register access methods */
13856 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13857 tp->write32 = tg3_write_indirect_reg32;
13858 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13859 (tg3_flag(tp, PCI_EXPRESS) &&
13860 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13862 * Back to back register writes can cause problems on these
13863 * chips, the workaround is to read back all reg writes
13864 * except those to mailbox regs.
13866 * See tg3_write_indirect_reg32().
13868 tp->write32 = tg3_write_flush_reg32;
13871 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13872 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13873 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13874 tp->write32_rx_mbox = tg3_write_flush_reg32;
13877 if (tg3_flag(tp, ICH_WORKAROUND)) {
13878 tp->read32 = tg3_read_indirect_reg32;
13879 tp->write32 = tg3_write_indirect_reg32;
13880 tp->read32_mbox = tg3_read_indirect_mbox;
13881 tp->write32_mbox = tg3_write_indirect_mbox;
13882 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13883 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13888 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13889 pci_cmd &= ~PCI_COMMAND_MEMORY;
13890 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13892 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13893 tp->read32_mbox = tg3_read32_mbox_5906;
13894 tp->write32_mbox = tg3_write32_mbox_5906;
13895 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13896 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13899 if (tp->write32 == tg3_write_indirect_reg32 ||
13900 (tg3_flag(tp, PCIX_MODE) &&
13901 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13902 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13903 tg3_flag_set(tp, SRAM_USE_CONFIG);
13905 /* Get eeprom hw config before calling tg3_set_power_state().
13906 * In particular, the TG3_FLAG_IS_NIC flag must be
13907 * determined before calling tg3_set_power_state() so that
13908 * we know whether or not to switch out of Vaux power.
13909 * When the flag is set, it means that GPIO1 is used for eeprom
13910 * write protect and also implies that it is a LOM where GPIOs
13911 * are not used to switch power.
13913 tg3_get_eeprom_hw_cfg(tp);
13915 if (tg3_flag(tp, ENABLE_APE)) {
13916 /* Allow reads and writes to the
13917 * APE register and memory space.
13919 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13920 PCISTATE_ALLOW_APE_SHMEM_WR |
13921 PCISTATE_ALLOW_APE_PSPACE_WR;
13922 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13926 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13927 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13928 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13929 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13930 tg3_flag(tp, 57765_PLUS))
13931 tg3_flag_set(tp, CPMU_PRESENT);
13933 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13934 * GPIO1 driven high will bring 5700's external PHY out of reset.
13935 * It is also used as eeprom write protect on LOMs.
13937 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13938 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13939 tg3_flag(tp, EEPROM_WRITE_PROT))
13940 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13941 GRC_LCLCTRL_GPIO_OUTPUT1);
13942 /* Unused GPIO3 must be driven as output on 5752 because there
13943 * are no pull-up resistors on unused GPIO pins.
13945 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13946 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13950 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13951 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13953 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13954 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13955 /* Turn off the debug UART. */
13956 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13957 if (tg3_flag(tp, IS_NIC))
13958 /* Keep VMain power. */
13959 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13960 GRC_LCLCTRL_GPIO_OUTPUT0;
13963 /* Force the chip into D0. */
13964 err = tg3_power_up(tp);
13966 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13970 /* Derive initial jumbo mode from MTU assigned in
13971 * ether_setup() via the alloc_etherdev() call
13973 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13974 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13976 /* Determine WakeOnLan speed to use. */
13977 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13978 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13979 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13980 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13981 tg3_flag_clear(tp, WOL_SPEED_100MB);
13983 tg3_flag_set(tp, WOL_SPEED_100MB);
13986 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13987 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13989 /* A few boards don't want Ethernet@WireSpeed phy feature */
13990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13991 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13992 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13993 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13994 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13995 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13996 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13998 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13999 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14000 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14001 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14002 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14004 if (tg3_flag(tp, 5705_PLUS) &&
14005 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14006 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14007 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14008 !tg3_flag(tp, 57765_PLUS)) {
14009 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14011 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14012 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14013 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14014 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14015 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14016 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14017 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14019 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14022 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14023 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14024 tp->phy_otp = tg3_read_otp_phycfg(tp);
14025 if (tp->phy_otp == 0)
14026 tp->phy_otp = TG3_OTP_DEFAULT;
14029 if (tg3_flag(tp, CPMU_PRESENT))
14030 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14032 tp->mi_mode = MAC_MI_MODE_BASE;
14034 tp->coalesce_mode = 0;
14035 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14036 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14037 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14039 /* Set these bits to enable statistics workaround. */
14040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14041 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14042 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14043 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14044 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14048 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14049 tg3_flag_set(tp, USE_PHYLIB);
14051 err = tg3_mdio_init(tp);
14055 /* Initialize data/descriptor byte/word swapping. */
14056 val = tr32(GRC_MODE);
14057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14058 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14059 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14060 GRC_MODE_B2HRX_ENABLE |
14061 GRC_MODE_HTX2B_ENABLE |
14062 GRC_MODE_HOST_STACKUP);
14064 val &= GRC_MODE_HOST_STACKUP;
14066 tw32(GRC_MODE, val | tp->grc_mode);
14068 tg3_switch_clocks(tp);
14070 /* Clear this out for sanity. */
14071 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14073 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14075 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14076 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14077 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14079 if (chiprevid == CHIPREV_ID_5701_A0 ||
14080 chiprevid == CHIPREV_ID_5701_B0 ||
14081 chiprevid == CHIPREV_ID_5701_B2 ||
14082 chiprevid == CHIPREV_ID_5701_B5) {
14083 void __iomem *sram_base;
14085 /* Write some dummy words into the SRAM status block
14086 * area, see if it reads back correctly. If the return
14087 * value is bad, force enable the PCIX workaround.
14089 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14091 writel(0x00000000, sram_base);
14092 writel(0x00000000, sram_base + 4);
14093 writel(0xffffffff, sram_base + 4);
14094 if (readl(sram_base) != 0x00000000)
14095 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14100 tg3_nvram_init(tp);
14102 grc_misc_cfg = tr32(GRC_MISC_CFG);
14103 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14105 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14106 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14107 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14108 tg3_flag_set(tp, IS_5788);
14110 if (!tg3_flag(tp, IS_5788) &&
14111 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14112 tg3_flag_set(tp, TAGGED_STATUS);
14113 if (tg3_flag(tp, TAGGED_STATUS)) {
14114 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14115 HOSTCC_MODE_CLRTICK_TXBD);
14117 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14118 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14119 tp->misc_host_ctrl);
14122 /* Preserve the APE MAC_MODE bits */
14123 if (tg3_flag(tp, ENABLE_APE))
14124 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14126 tp->mac_mode = TG3_DEF_MAC_MODE;
14128 /* these are limited to 10/100 only */
14129 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14130 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14131 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14132 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14133 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14134 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14135 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14136 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14137 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14138 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14139 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14140 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14141 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14142 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14143 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14144 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14146 err = tg3_phy_probe(tp);
14148 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14149 /* ... but do not return immediately ... */
14154 tg3_read_fw_ver(tp);
14156 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14157 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14160 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14162 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14165 /* 5700 {AX,BX} chips have a broken status block link
14166 * change bit implementation, so we must use the
14167 * status register in those cases.
14169 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14170 tg3_flag_set(tp, USE_LINKCHG_REG);
14172 tg3_flag_clear(tp, USE_LINKCHG_REG);
14174 /* The led_ctrl is set during tg3_phy_probe, here we might
14175 * have to force the link status polling mechanism based
14176 * upon subsystem IDs.
14178 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14180 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14181 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14182 tg3_flag_set(tp, USE_LINKCHG_REG);
14185 /* For all SERDES we poll the MAC status register. */
14186 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14187 tg3_flag_set(tp, POLL_SERDES);
14189 tg3_flag_clear(tp, POLL_SERDES);
14191 tp->rx_offset = NET_IP_ALIGN;
14192 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14193 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14194 tg3_flag(tp, PCIX_MODE)) {
14196 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14197 tp->rx_copy_thresh = ~(u16)0;
14201 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14202 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14203 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14205 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14207 /* Increment the rx prod index on the rx std ring by at most
14208 * 8 for these chips to workaround hw errata.
14210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14211 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14212 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14213 tp->rx_std_max_post = 8;
14215 if (tg3_flag(tp, ASPM_WORKAROUND))
14216 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14217 PCIE_PWR_MGMT_L1_THRESH_MSK;
14222 #ifdef CONFIG_SPARC
14223 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14225 struct net_device *dev = tp->dev;
14226 struct pci_dev *pdev = tp->pdev;
14227 struct device_node *dp = pci_device_to_OF_node(pdev);
14228 const unsigned char *addr;
14231 addr = of_get_property(dp, "local-mac-address", &len);
14232 if (addr && len == 6) {
14233 memcpy(dev->dev_addr, addr, 6);
14234 memcpy(dev->perm_addr, dev->dev_addr, 6);
14240 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14242 struct net_device *dev = tp->dev;
14244 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14245 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14250 static int __devinit tg3_get_device_address(struct tg3 *tp)
14252 struct net_device *dev = tp->dev;
14253 u32 hi, lo, mac_offset;
14256 #ifdef CONFIG_SPARC
14257 if (!tg3_get_macaddr_sparc(tp))
14262 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14263 tg3_flag(tp, 5780_CLASS)) {
14264 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14266 if (tg3_nvram_lock(tp))
14267 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14269 tg3_nvram_unlock(tp);
14270 } else if (tg3_flag(tp, 5717_PLUS)) {
14271 if (PCI_FUNC(tp->pdev->devfn) & 1)
14273 if (PCI_FUNC(tp->pdev->devfn) > 1)
14274 mac_offset += 0x18c;
14275 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14278 /* First try to get it from MAC address mailbox. */
14279 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14280 if ((hi >> 16) == 0x484b) {
14281 dev->dev_addr[0] = (hi >> 8) & 0xff;
14282 dev->dev_addr[1] = (hi >> 0) & 0xff;
14284 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14285 dev->dev_addr[2] = (lo >> 24) & 0xff;
14286 dev->dev_addr[3] = (lo >> 16) & 0xff;
14287 dev->dev_addr[4] = (lo >> 8) & 0xff;
14288 dev->dev_addr[5] = (lo >> 0) & 0xff;
14290 /* Some old bootcode may report a 0 MAC address in SRAM */
14291 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14294 /* Next, try NVRAM. */
14295 if (!tg3_flag(tp, NO_NVRAM) &&
14296 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14297 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14298 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14299 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14301 /* Finally just fetch it out of the MAC control regs. */
14303 hi = tr32(MAC_ADDR_0_HIGH);
14304 lo = tr32(MAC_ADDR_0_LOW);
14306 dev->dev_addr[5] = lo & 0xff;
14307 dev->dev_addr[4] = (lo >> 8) & 0xff;
14308 dev->dev_addr[3] = (lo >> 16) & 0xff;
14309 dev->dev_addr[2] = (lo >> 24) & 0xff;
14310 dev->dev_addr[1] = hi & 0xff;
14311 dev->dev_addr[0] = (hi >> 8) & 0xff;
14315 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14316 #ifdef CONFIG_SPARC
14317 if (!tg3_get_default_macaddr_sparc(tp))
14322 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14326 #define BOUNDARY_SINGLE_CACHELINE 1
14327 #define BOUNDARY_MULTI_CACHELINE 2
14329 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14331 int cacheline_size;
14335 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14337 cacheline_size = 1024;
14339 cacheline_size = (int) byte * 4;
14341 /* On 5703 and later chips, the boundary bits have no
14344 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14345 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14346 !tg3_flag(tp, PCI_EXPRESS))
14349 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14350 goal = BOUNDARY_MULTI_CACHELINE;
14352 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14353 goal = BOUNDARY_SINGLE_CACHELINE;
14359 if (tg3_flag(tp, 57765_PLUS)) {
14360 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14367 /* PCI controllers on most RISC systems tend to disconnect
14368 * when a device tries to burst across a cache-line boundary.
14369 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14371 * Unfortunately, for PCI-E there are only limited
14372 * write-side controls for this, and thus for reads
14373 * we will still get the disconnects. We'll also waste
14374 * these PCI cycles for both read and write for chips
14375 * other than 5700 and 5701 which do not implement the
14378 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14379 switch (cacheline_size) {
14384 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14385 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14386 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14388 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14389 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14394 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14395 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14399 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14400 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14403 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14404 switch (cacheline_size) {
14408 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14409 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14410 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14416 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14417 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14421 switch (cacheline_size) {
14423 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14424 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14425 DMA_RWCTRL_WRITE_BNDRY_16);
14430 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14431 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14432 DMA_RWCTRL_WRITE_BNDRY_32);
14437 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14438 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14439 DMA_RWCTRL_WRITE_BNDRY_64);
14444 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14445 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14446 DMA_RWCTRL_WRITE_BNDRY_128);
14451 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14452 DMA_RWCTRL_WRITE_BNDRY_256);
14455 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14456 DMA_RWCTRL_WRITE_BNDRY_512);
14460 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14461 DMA_RWCTRL_WRITE_BNDRY_1024);
14470 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14472 struct tg3_internal_buffer_desc test_desc;
14473 u32 sram_dma_descs;
14476 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14478 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14479 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14480 tw32(RDMAC_STATUS, 0);
14481 tw32(WDMAC_STATUS, 0);
14483 tw32(BUFMGR_MODE, 0);
14484 tw32(FTQ_RESET, 0);
14486 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14487 test_desc.addr_lo = buf_dma & 0xffffffff;
14488 test_desc.nic_mbuf = 0x00002100;
14489 test_desc.len = size;
14492 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14493 * the *second* time the tg3 driver was getting loaded after an
14496 * Broadcom tells me:
14497 * ...the DMA engine is connected to the GRC block and a DMA
14498 * reset may affect the GRC block in some unpredictable way...
14499 * The behavior of resets to individual blocks has not been tested.
14501 * Broadcom noted the GRC reset will also reset all sub-components.
14504 test_desc.cqid_sqid = (13 << 8) | 2;
14506 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14509 test_desc.cqid_sqid = (16 << 8) | 7;
14511 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14514 test_desc.flags = 0x00000005;
14516 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14519 val = *(((u32 *)&test_desc) + i);
14520 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14521 sram_dma_descs + (i * sizeof(u32)));
14522 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14524 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14527 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14529 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14532 for (i = 0; i < 40; i++) {
14536 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14538 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14539 if ((val & 0xffff) == sram_dma_descs) {
14550 #define TEST_BUFFER_SIZE 0x2000
14552 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14553 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14557 static int __devinit tg3_test_dma(struct tg3 *tp)
14559 dma_addr_t buf_dma;
14560 u32 *buf, saved_dma_rwctrl;
14563 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14564 &buf_dma, GFP_KERNEL);
14570 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14571 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14573 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14575 if (tg3_flag(tp, 57765_PLUS))
14578 if (tg3_flag(tp, PCI_EXPRESS)) {
14579 /* DMA read watermark not used on PCIE */
14580 tp->dma_rwctrl |= 0x00180000;
14581 } else if (!tg3_flag(tp, PCIX_MODE)) {
14582 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14583 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14584 tp->dma_rwctrl |= 0x003f0000;
14586 tp->dma_rwctrl |= 0x003f000f;
14588 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14589 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14590 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14591 u32 read_water = 0x7;
14593 /* If the 5704 is behind the EPB bridge, we can
14594 * do the less restrictive ONE_DMA workaround for
14595 * better performance.
14597 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14598 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14599 tp->dma_rwctrl |= 0x8000;
14600 else if (ccval == 0x6 || ccval == 0x7)
14601 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14603 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14605 /* Set bit 23 to enable PCIX hw bug fix */
14607 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14608 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14610 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14611 /* 5780 always in PCIX mode */
14612 tp->dma_rwctrl |= 0x00144000;
14613 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14614 /* 5714 always in PCIX mode */
14615 tp->dma_rwctrl |= 0x00148000;
14617 tp->dma_rwctrl |= 0x001b000f;
14621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14623 tp->dma_rwctrl &= 0xfffffff0;
14625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14626 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14627 /* Remove this if it causes problems for some boards. */
14628 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14630 /* On 5700/5701 chips, we need to set this bit.
14631 * Otherwise the chip will issue cacheline transactions
14632 * to streamable DMA memory with not all the byte
14633 * enables turned on. This is an error on several
14634 * RISC PCI controllers, in particular sparc64.
14636 * On 5703/5704 chips, this bit has been reassigned
14637 * a different meaning. In particular, it is used
14638 * on those chips to enable a PCI-X workaround.
14640 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14643 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14646 /* Unneeded, already done by tg3_get_invariants. */
14647 tg3_switch_clocks(tp);
14650 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14651 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14654 /* It is best to perform DMA test with maximum write burst size
14655 * to expose the 5700/5701 write DMA bug.
14657 saved_dma_rwctrl = tp->dma_rwctrl;
14658 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14659 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14664 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14667 /* Send the buffer to the chip. */
14668 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14670 dev_err(&tp->pdev->dev,
14671 "%s: Buffer write failed. err = %d\n",
14677 /* validate data reached card RAM correctly. */
14678 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14680 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14681 if (le32_to_cpu(val) != p[i]) {
14682 dev_err(&tp->pdev->dev,
14683 "%s: Buffer corrupted on device! "
14684 "(%d != %d)\n", __func__, val, i);
14685 /* ret = -ENODEV here? */
14690 /* Now read it back. */
14691 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14693 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14694 "err = %d\n", __func__, ret);
14699 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14703 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14704 DMA_RWCTRL_WRITE_BNDRY_16) {
14705 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14706 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14707 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14710 dev_err(&tp->pdev->dev,
14711 "%s: Buffer corrupted on read back! "
14712 "(%d != %d)\n", __func__, p[i], i);
14718 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14724 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14725 DMA_RWCTRL_WRITE_BNDRY_16) {
14726 /* DMA test passed without adjusting DMA boundary,
14727 * now look for chipsets that are known to expose the
14728 * DMA bug without failing the test.
14730 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14731 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14732 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14734 /* Safe to use the calculated DMA boundary. */
14735 tp->dma_rwctrl = saved_dma_rwctrl;
14738 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14742 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14747 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14749 if (tg3_flag(tp, 57765_PLUS)) {
14750 tp->bufmgr_config.mbuf_read_dma_low_water =
14751 DEFAULT_MB_RDMA_LOW_WATER_5705;
14752 tp->bufmgr_config.mbuf_mac_rx_low_water =
14753 DEFAULT_MB_MACRX_LOW_WATER_57765;
14754 tp->bufmgr_config.mbuf_high_water =
14755 DEFAULT_MB_HIGH_WATER_57765;
14757 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14758 DEFAULT_MB_RDMA_LOW_WATER_5705;
14759 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14760 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14761 tp->bufmgr_config.mbuf_high_water_jumbo =
14762 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14763 } else if (tg3_flag(tp, 5705_PLUS)) {
14764 tp->bufmgr_config.mbuf_read_dma_low_water =
14765 DEFAULT_MB_RDMA_LOW_WATER_5705;
14766 tp->bufmgr_config.mbuf_mac_rx_low_water =
14767 DEFAULT_MB_MACRX_LOW_WATER_5705;
14768 tp->bufmgr_config.mbuf_high_water =
14769 DEFAULT_MB_HIGH_WATER_5705;
14770 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14771 tp->bufmgr_config.mbuf_mac_rx_low_water =
14772 DEFAULT_MB_MACRX_LOW_WATER_5906;
14773 tp->bufmgr_config.mbuf_high_water =
14774 DEFAULT_MB_HIGH_WATER_5906;
14777 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14778 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14779 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14780 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14781 tp->bufmgr_config.mbuf_high_water_jumbo =
14782 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14784 tp->bufmgr_config.mbuf_read_dma_low_water =
14785 DEFAULT_MB_RDMA_LOW_WATER;
14786 tp->bufmgr_config.mbuf_mac_rx_low_water =
14787 DEFAULT_MB_MACRX_LOW_WATER;
14788 tp->bufmgr_config.mbuf_high_water =
14789 DEFAULT_MB_HIGH_WATER;
14791 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14792 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14793 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14794 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14795 tp->bufmgr_config.mbuf_high_water_jumbo =
14796 DEFAULT_MB_HIGH_WATER_JUMBO;
14799 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14800 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14803 static char * __devinit tg3_phy_string(struct tg3 *tp)
14805 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14806 case TG3_PHY_ID_BCM5400: return "5400";
14807 case TG3_PHY_ID_BCM5401: return "5401";
14808 case TG3_PHY_ID_BCM5411: return "5411";
14809 case TG3_PHY_ID_BCM5701: return "5701";
14810 case TG3_PHY_ID_BCM5703: return "5703";
14811 case TG3_PHY_ID_BCM5704: return "5704";
14812 case TG3_PHY_ID_BCM5705: return "5705";
14813 case TG3_PHY_ID_BCM5750: return "5750";
14814 case TG3_PHY_ID_BCM5752: return "5752";
14815 case TG3_PHY_ID_BCM5714: return "5714";
14816 case TG3_PHY_ID_BCM5780: return "5780";
14817 case TG3_PHY_ID_BCM5755: return "5755";
14818 case TG3_PHY_ID_BCM5787: return "5787";
14819 case TG3_PHY_ID_BCM5784: return "5784";
14820 case TG3_PHY_ID_BCM5756: return "5722/5756";
14821 case TG3_PHY_ID_BCM5906: return "5906";
14822 case TG3_PHY_ID_BCM5761: return "5761";
14823 case TG3_PHY_ID_BCM5718C: return "5718C";
14824 case TG3_PHY_ID_BCM5718S: return "5718S";
14825 case TG3_PHY_ID_BCM57765: return "57765";
14826 case TG3_PHY_ID_BCM5719C: return "5719C";
14827 case TG3_PHY_ID_BCM5720C: return "5720C";
14828 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14829 case 0: return "serdes";
14830 default: return "unknown";
14834 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14836 if (tg3_flag(tp, PCI_EXPRESS)) {
14837 strcpy(str, "PCI Express");
14839 } else if (tg3_flag(tp, PCIX_MODE)) {
14840 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14842 strcpy(str, "PCIX:");
14844 if ((clock_ctrl == 7) ||
14845 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14846 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14847 strcat(str, "133MHz");
14848 else if (clock_ctrl == 0)
14849 strcat(str, "33MHz");
14850 else if (clock_ctrl == 2)
14851 strcat(str, "50MHz");
14852 else if (clock_ctrl == 4)
14853 strcat(str, "66MHz");
14854 else if (clock_ctrl == 6)
14855 strcat(str, "100MHz");
14857 strcpy(str, "PCI:");
14858 if (tg3_flag(tp, PCI_HIGH_SPEED))
14859 strcat(str, "66MHz");
14861 strcat(str, "33MHz");
14863 if (tg3_flag(tp, PCI_32BIT))
14864 strcat(str, ":32-bit");
14866 strcat(str, ":64-bit");
14870 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14872 struct pci_dev *peer;
14873 unsigned int func, devnr = tp->pdev->devfn & ~7;
14875 for (func = 0; func < 8; func++) {
14876 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14877 if (peer && peer != tp->pdev)
14881 /* 5704 can be configured in single-port mode, set peer to
14882 * tp->pdev in that case.
14890 * We don't need to keep the refcount elevated; there's no way
14891 * to remove one half of this device without removing the other
14898 static void __devinit tg3_init_coal(struct tg3 *tp)
14900 struct ethtool_coalesce *ec = &tp->coal;
14902 memset(ec, 0, sizeof(*ec));
14903 ec->cmd = ETHTOOL_GCOALESCE;
14904 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14905 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14906 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14907 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14908 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14909 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14910 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14911 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14912 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14914 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14915 HOSTCC_MODE_CLRTICK_TXBD)) {
14916 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14917 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14918 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14919 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14922 if (tg3_flag(tp, 5705_PLUS)) {
14923 ec->rx_coalesce_usecs_irq = 0;
14924 ec->tx_coalesce_usecs_irq = 0;
14925 ec->stats_block_coalesce_usecs = 0;
14929 static const struct net_device_ops tg3_netdev_ops = {
14930 .ndo_open = tg3_open,
14931 .ndo_stop = tg3_close,
14932 .ndo_start_xmit = tg3_start_xmit,
14933 .ndo_get_stats64 = tg3_get_stats64,
14934 .ndo_validate_addr = eth_validate_addr,
14935 .ndo_set_multicast_list = tg3_set_rx_mode,
14936 .ndo_set_mac_address = tg3_set_mac_addr,
14937 .ndo_do_ioctl = tg3_ioctl,
14938 .ndo_tx_timeout = tg3_tx_timeout,
14939 .ndo_change_mtu = tg3_change_mtu,
14940 .ndo_fix_features = tg3_fix_features,
14941 .ndo_set_features = tg3_set_features,
14942 #ifdef CONFIG_NET_POLL_CONTROLLER
14943 .ndo_poll_controller = tg3_poll_controller,
14947 static int __devinit tg3_init_one(struct pci_dev *pdev,
14948 const struct pci_device_id *ent)
14950 struct net_device *dev;
14952 int i, err, pm_cap;
14953 u32 sndmbx, rcvmbx, intmbx;
14955 u64 dma_mask, persist_dma_mask;
14958 printk_once(KERN_INFO "%s\n", version);
14960 err = pci_enable_device(pdev);
14962 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14966 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14968 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14969 goto err_out_disable_pdev;
14972 pci_set_master(pdev);
14974 /* Find power-management capability. */
14975 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14977 dev_err(&pdev->dev,
14978 "Cannot find Power Management capability, aborting\n");
14980 goto err_out_free_res;
14983 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14985 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14987 goto err_out_free_res;
14990 SET_NETDEV_DEV(dev, &pdev->dev);
14992 tp = netdev_priv(dev);
14995 tp->pm_cap = pm_cap;
14996 tp->rx_mode = TG3_DEF_RX_MODE;
14997 tp->tx_mode = TG3_DEF_TX_MODE;
15000 tp->msg_enable = tg3_debug;
15002 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15004 /* The word/byte swap controls here control register access byte
15005 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15008 tp->misc_host_ctrl =
15009 MISC_HOST_CTRL_MASK_PCI_INT |
15010 MISC_HOST_CTRL_WORD_SWAP |
15011 MISC_HOST_CTRL_INDIR_ACCESS |
15012 MISC_HOST_CTRL_PCISTATE_RW;
15014 /* The NONFRM (non-frame) byte/word swap controls take effect
15015 * on descriptor entries, anything which isn't packet data.
15017 * The StrongARM chips on the board (one for tx, one for rx)
15018 * are running in big-endian mode.
15020 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15021 GRC_MODE_WSWAP_NONFRM_DATA);
15022 #ifdef __BIG_ENDIAN
15023 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15025 spin_lock_init(&tp->lock);
15026 spin_lock_init(&tp->indirect_lock);
15027 INIT_WORK(&tp->reset_task, tg3_reset_task);
15029 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15031 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15033 goto err_out_free_dev;
15036 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15037 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15039 dev->ethtool_ops = &tg3_ethtool_ops;
15040 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15041 dev->netdev_ops = &tg3_netdev_ops;
15042 dev->irq = pdev->irq;
15044 err = tg3_get_invariants(tp);
15046 dev_err(&pdev->dev,
15047 "Problem fetching invariants of chip, aborting\n");
15048 goto err_out_iounmap;
15051 /* The EPB bridge inside 5714, 5715, and 5780 and any
15052 * device behind the EPB cannot support DMA addresses > 40-bit.
15053 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15054 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15055 * do DMA address check in tg3_start_xmit().
15057 if (tg3_flag(tp, IS_5788))
15058 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15059 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15060 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15061 #ifdef CONFIG_HIGHMEM
15062 dma_mask = DMA_BIT_MASK(64);
15065 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15067 /* Configure DMA attributes. */
15068 if (dma_mask > DMA_BIT_MASK(32)) {
15069 err = pci_set_dma_mask(pdev, dma_mask);
15071 features |= NETIF_F_HIGHDMA;
15072 err = pci_set_consistent_dma_mask(pdev,
15075 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15076 "DMA for consistent allocations\n");
15077 goto err_out_iounmap;
15081 if (err || dma_mask == DMA_BIT_MASK(32)) {
15082 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15084 dev_err(&pdev->dev,
15085 "No usable DMA configuration, aborting\n");
15086 goto err_out_iounmap;
15090 tg3_init_bufmgr_config(tp);
15092 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15094 /* 5700 B0 chips do not support checksumming correctly due
15095 * to hardware bugs.
15097 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15098 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15100 if (tg3_flag(tp, 5755_PLUS))
15101 features |= NETIF_F_IPV6_CSUM;
15104 /* TSO is on by default on chips that support hardware TSO.
15105 * Firmware TSO on older chips gives lower performance, so it
15106 * is off by default, but can be enabled using ethtool.
15108 if ((tg3_flag(tp, HW_TSO_1) ||
15109 tg3_flag(tp, HW_TSO_2) ||
15110 tg3_flag(tp, HW_TSO_3)) &&
15111 (features & NETIF_F_IP_CSUM))
15112 features |= NETIF_F_TSO;
15113 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15114 if (features & NETIF_F_IPV6_CSUM)
15115 features |= NETIF_F_TSO6;
15116 if (tg3_flag(tp, HW_TSO_3) ||
15117 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15118 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15119 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15120 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15121 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15122 features |= NETIF_F_TSO_ECN;
15125 dev->features |= features;
15126 dev->vlan_features |= features;
15129 * Add loopback capability only for a subset of devices that support
15130 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15131 * loopback for the remaining devices.
15133 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15134 !tg3_flag(tp, CPMU_PRESENT))
15135 /* Add the loopback capability */
15136 features |= NETIF_F_LOOPBACK;
15138 dev->hw_features |= features;
15140 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15141 !tg3_flag(tp, TSO_CAPABLE) &&
15142 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15143 tg3_flag_set(tp, MAX_RXPEND_64);
15144 tp->rx_pending = 63;
15147 err = tg3_get_device_address(tp);
15149 dev_err(&pdev->dev,
15150 "Could not obtain valid ethernet address, aborting\n");
15151 goto err_out_iounmap;
15154 if (tg3_flag(tp, ENABLE_APE)) {
15155 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15156 if (!tp->aperegs) {
15157 dev_err(&pdev->dev,
15158 "Cannot map APE registers, aborting\n");
15160 goto err_out_iounmap;
15163 tg3_ape_lock_init(tp);
15165 if (tg3_flag(tp, ENABLE_ASF))
15166 tg3_read_dash_ver(tp);
15170 * Reset chip in case UNDI or EFI driver did not shutdown
15171 * DMA self test will enable WDMAC and we'll see (spurious)
15172 * pending DMA on the PCI bus at that point.
15174 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15175 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15176 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15177 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15180 err = tg3_test_dma(tp);
15182 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15183 goto err_out_apeunmap;
15186 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15187 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15188 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15189 for (i = 0; i < tp->irq_max; i++) {
15190 struct tg3_napi *tnapi = &tp->napi[i];
15193 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15195 tnapi->int_mbox = intmbx;
15201 tnapi->consmbox = rcvmbx;
15202 tnapi->prodmbox = sndmbx;
15205 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15207 tnapi->coal_now = HOSTCC_MODE_NOW;
15209 if (!tg3_flag(tp, SUPPORT_MSIX))
15213 * If we support MSIX, we'll be using RSS. If we're using
15214 * RSS, the first vector only handles link interrupts and the
15215 * remaining vectors handle rx and tx interrupts. Reuse the
15216 * mailbox values for the next iteration. The values we setup
15217 * above are still useful for the single vectored mode.
15232 pci_set_drvdata(pdev, dev);
15234 err = register_netdev(dev);
15236 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15237 goto err_out_apeunmap;
15240 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15241 tp->board_part_number,
15242 tp->pci_chip_rev_id,
15243 tg3_bus_string(tp, str),
15246 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15247 struct phy_device *phydev;
15248 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15250 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15251 phydev->drv->name, dev_name(&phydev->dev));
15255 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15256 ethtype = "10/100Base-TX";
15257 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15258 ethtype = "1000Base-SX";
15260 ethtype = "10/100/1000Base-T";
15262 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15263 "(WireSpeed[%d], EEE[%d])\n",
15264 tg3_phy_string(tp), ethtype,
15265 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15266 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15269 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15270 (dev->features & NETIF_F_RXCSUM) != 0,
15271 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15272 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15273 tg3_flag(tp, ENABLE_ASF) != 0,
15274 tg3_flag(tp, TSO_CAPABLE) != 0);
15275 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15277 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15278 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15280 pci_save_state(pdev);
15286 iounmap(tp->aperegs);
15287 tp->aperegs = NULL;
15300 pci_release_regions(pdev);
15302 err_out_disable_pdev:
15303 pci_disable_device(pdev);
15304 pci_set_drvdata(pdev, NULL);
15308 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15310 struct net_device *dev = pci_get_drvdata(pdev);
15313 struct tg3 *tp = netdev_priv(dev);
15316 release_firmware(tp->fw);
15318 cancel_work_sync(&tp->reset_task);
15320 if (!tg3_flag(tp, USE_PHYLIB)) {
15325 unregister_netdev(dev);
15327 iounmap(tp->aperegs);
15328 tp->aperegs = NULL;
15335 pci_release_regions(pdev);
15336 pci_disable_device(pdev);
15337 pci_set_drvdata(pdev, NULL);
15341 #ifdef CONFIG_PM_SLEEP
15342 static int tg3_suspend(struct device *device)
15344 struct pci_dev *pdev = to_pci_dev(device);
15345 struct net_device *dev = pci_get_drvdata(pdev);
15346 struct tg3 *tp = netdev_priv(dev);
15349 if (!netif_running(dev))
15352 flush_work_sync(&tp->reset_task);
15354 tg3_netif_stop(tp);
15356 del_timer_sync(&tp->timer);
15358 tg3_full_lock(tp, 1);
15359 tg3_disable_ints(tp);
15360 tg3_full_unlock(tp);
15362 netif_device_detach(dev);
15364 tg3_full_lock(tp, 0);
15365 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15366 tg3_flag_clear(tp, INIT_COMPLETE);
15367 tg3_full_unlock(tp);
15369 err = tg3_power_down_prepare(tp);
15373 tg3_full_lock(tp, 0);
15375 tg3_flag_set(tp, INIT_COMPLETE);
15376 err2 = tg3_restart_hw(tp, 1);
15380 tp->timer.expires = jiffies + tp->timer_offset;
15381 add_timer(&tp->timer);
15383 netif_device_attach(dev);
15384 tg3_netif_start(tp);
15387 tg3_full_unlock(tp);
15396 static int tg3_resume(struct device *device)
15398 struct pci_dev *pdev = to_pci_dev(device);
15399 struct net_device *dev = pci_get_drvdata(pdev);
15400 struct tg3 *tp = netdev_priv(dev);
15403 if (!netif_running(dev))
15406 netif_device_attach(dev);
15408 tg3_full_lock(tp, 0);
15410 tg3_flag_set(tp, INIT_COMPLETE);
15411 err = tg3_restart_hw(tp, 1);
15415 tp->timer.expires = jiffies + tp->timer_offset;
15416 add_timer(&tp->timer);
15418 tg3_netif_start(tp);
15421 tg3_full_unlock(tp);
15429 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15430 #define TG3_PM_OPS (&tg3_pm_ops)
15434 #define TG3_PM_OPS NULL
15436 #endif /* CONFIG_PM_SLEEP */
15439 * tg3_io_error_detected - called when PCI error is detected
15440 * @pdev: Pointer to PCI device
15441 * @state: The current pci connection state
15443 * This function is called after a PCI bus error affecting
15444 * this device has been detected.
15446 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15447 pci_channel_state_t state)
15449 struct net_device *netdev = pci_get_drvdata(pdev);
15450 struct tg3 *tp = netdev_priv(netdev);
15451 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15453 netdev_info(netdev, "PCI I/O error detected\n");
15457 if (!netif_running(netdev))
15462 tg3_netif_stop(tp);
15464 del_timer_sync(&tp->timer);
15465 tg3_flag_clear(tp, RESTART_TIMER);
15467 /* Want to make sure that the reset task doesn't run */
15468 cancel_work_sync(&tp->reset_task);
15469 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15470 tg3_flag_clear(tp, RESTART_TIMER);
15472 netif_device_detach(netdev);
15474 /* Clean up software state, even if MMIO is blocked */
15475 tg3_full_lock(tp, 0);
15476 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15477 tg3_full_unlock(tp);
15480 if (state == pci_channel_io_perm_failure)
15481 err = PCI_ERS_RESULT_DISCONNECT;
15483 pci_disable_device(pdev);
15491 * tg3_io_slot_reset - called after the pci bus has been reset.
15492 * @pdev: Pointer to PCI device
15494 * Restart the card from scratch, as if from a cold-boot.
15495 * At this point, the card has exprienced a hard reset,
15496 * followed by fixups by BIOS, and has its config space
15497 * set up identically to what it was at cold boot.
15499 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15501 struct net_device *netdev = pci_get_drvdata(pdev);
15502 struct tg3 *tp = netdev_priv(netdev);
15503 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15508 if (pci_enable_device(pdev)) {
15509 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15513 pci_set_master(pdev);
15514 pci_restore_state(pdev);
15515 pci_save_state(pdev);
15517 if (!netif_running(netdev)) {
15518 rc = PCI_ERS_RESULT_RECOVERED;
15522 err = tg3_power_up(tp);
15524 netdev_err(netdev, "Failed to restore register access.\n");
15528 rc = PCI_ERS_RESULT_RECOVERED;
15537 * tg3_io_resume - called when traffic can start flowing again.
15538 * @pdev: Pointer to PCI device
15540 * This callback is called when the error recovery driver tells
15541 * us that its OK to resume normal operation.
15543 static void tg3_io_resume(struct pci_dev *pdev)
15545 struct net_device *netdev = pci_get_drvdata(pdev);
15546 struct tg3 *tp = netdev_priv(netdev);
15551 if (!netif_running(netdev))
15554 tg3_full_lock(tp, 0);
15555 tg3_flag_set(tp, INIT_COMPLETE);
15556 err = tg3_restart_hw(tp, 1);
15557 tg3_full_unlock(tp);
15559 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15563 netif_device_attach(netdev);
15565 tp->timer.expires = jiffies + tp->timer_offset;
15566 add_timer(&tp->timer);
15568 tg3_netif_start(tp);
15576 static struct pci_error_handlers tg3_err_handler = {
15577 .error_detected = tg3_io_error_detected,
15578 .slot_reset = tg3_io_slot_reset,
15579 .resume = tg3_io_resume
15582 static struct pci_driver tg3_driver = {
15583 .name = DRV_MODULE_NAME,
15584 .id_table = tg3_pci_tbl,
15585 .probe = tg3_init_one,
15586 .remove = __devexit_p(tg3_remove_one),
15587 .err_handler = &tg3_err_handler,
15588 .driver.pm = TG3_PM_OPS,
15591 static int __init tg3_init(void)
15593 return pci_register_driver(&tg3_driver);
15596 static void __exit tg3_cleanup(void)
15598 pci_unregister_driver(&tg3_driver);
15601 module_init(tg3_init);
15602 module_exit(tg3_cleanup);