2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2007 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
42 #include <net/checksum.h>
45 #include <asm/system.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
51 #include <asm/idprom.h>
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
58 #define TG3_VLAN_TAG_USED 0
61 #define TG3_TSO_SUPPORT 1
65 #define DRV_MODULE_NAME "tg3"
66 #define PFX DRV_MODULE_NAME ": "
67 #define DRV_MODULE_VERSION "3.91"
68 #define DRV_MODULE_RELDATE "April 18, 2008"
70 #define TG3_DEF_MAC_MODE 0
71 #define TG3_DEF_RX_MODE 0
72 #define TG3_DEF_TX_MODE 0
73 #define TG3_DEF_MSG_ENABLE \
83 /* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
86 #define TG3_TX_TIMEOUT (5 * HZ)
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU 60
90 #define TG3_MAX_MTU(tp) \
91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
97 #define TG3_RX_RING_SIZE 512
98 #define TG3_DEF_RX_RING_PENDING 200
99 #define TG3_RX_JUMBO_RING_SIZE 256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
102 /* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
108 #define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
111 #define TG3_TX_RING_SIZE 512
112 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
114 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
122 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
124 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
133 #define TG3_NUM_TEST 6
135 static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
143 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147 static struct pci_device_id tg3_pci_tbl[] = {
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
218 static const struct {
219 const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
223 { "rx_ucast_packets" },
224 { "rx_mcast_packets" },
225 { "rx_bcast_packets" },
227 { "rx_align_errors" },
228 { "rx_xon_pause_rcvd" },
229 { "rx_xoff_pause_rcvd" },
230 { "rx_mac_ctrl_rcvd" },
231 { "rx_xoff_entered" },
232 { "rx_frame_too_long_errors" },
234 { "rx_undersize_packets" },
235 { "rx_in_length_errors" },
236 { "rx_out_length_errors" },
237 { "rx_64_or_less_octet_packets" },
238 { "rx_65_to_127_octet_packets" },
239 { "rx_128_to_255_octet_packets" },
240 { "rx_256_to_511_octet_packets" },
241 { "rx_512_to_1023_octet_packets" },
242 { "rx_1024_to_1522_octet_packets" },
243 { "rx_1523_to_2047_octet_packets" },
244 { "rx_2048_to_4095_octet_packets" },
245 { "rx_4096_to_8191_octet_packets" },
246 { "rx_8192_to_9022_octet_packets" },
253 { "tx_flow_control" },
255 { "tx_single_collisions" },
256 { "tx_mult_collisions" },
258 { "tx_excessive_collisions" },
259 { "tx_late_collisions" },
260 { "tx_collide_2times" },
261 { "tx_collide_3times" },
262 { "tx_collide_4times" },
263 { "tx_collide_5times" },
264 { "tx_collide_6times" },
265 { "tx_collide_7times" },
266 { "tx_collide_8times" },
267 { "tx_collide_9times" },
268 { "tx_collide_10times" },
269 { "tx_collide_11times" },
270 { "tx_collide_12times" },
271 { "tx_collide_13times" },
272 { "tx_collide_14times" },
273 { "tx_collide_15times" },
274 { "tx_ucast_packets" },
275 { "tx_mcast_packets" },
276 { "tx_bcast_packets" },
277 { "tx_carrier_sense_errors" },
281 { "dma_writeq_full" },
282 { "dma_write_prioq_full" },
286 { "rx_threshold_hit" },
288 { "dma_readq_full" },
289 { "dma_read_prioq_full" },
290 { "tx_comp_queue_full" },
292 { "ring_set_send_prod_index" },
293 { "ring_status_update" },
295 { "nic_avoided_irqs" },
296 { "nic_tx_threshold_hit" }
299 static const struct {
300 const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302 { "nvram test (online) " },
303 { "link test (online) " },
304 { "register test (offline)" },
305 { "memory test (offline)" },
306 { "loopback test (offline)" },
307 { "interrupt test (offline)" },
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
312 writel(val, tp->regs + off);
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
317 return (readl(tp->regs + off));
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
322 writel(val, tp->aperegs + off);
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
327 return (readl(tp->aperegs + off));
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
334 spin_lock_irqsave(&tp->indirect_lock, flags);
335 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337 spin_unlock_irqrestore(&tp->indirect_lock, flags);
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
342 writel(val, tp->regs + off);
343 readl(tp->regs + off);
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
351 spin_lock_irqsave(&tp->indirect_lock, flags);
352 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354 spin_unlock_irqrestore(&tp->indirect_lock, flags);
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
362 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364 TG3_64BIT_REG_LOW, val);
367 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369 TG3_64BIT_REG_LOW, val);
373 spin_lock_irqsave(&tp->indirect_lock, flags);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376 spin_unlock_irqrestore(&tp->indirect_lock, flags);
378 /* In indirect mode when disabling interrupts, we also need
379 * to clear the interrupt bit in the GRC local ctrl register.
381 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
383 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401 * where it is unsafe to read back the register without some delay.
402 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
407 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409 /* Non-posted methods */
410 tp->write32(tp, off, val);
413 tg3_write32(tp, off, val);
418 /* Wait again after the read for the posted method to guarantee that
419 * the wait time is met.
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
427 tp->write32_mbox(tp, off, val);
428 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 tp->read32_mbox(tp, off);
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
435 void __iomem *mbox = tp->regs + off;
437 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
439 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
445 return (readl(tp->regs + off + GRCMBOX_BASE));
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
450 writel(val, tp->regs + off + GRCMBOX_BASE);
453 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
459 #define tw32(reg,val) tp->write32(tp, reg, val)
460 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg) tp->read32(tp, reg)
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
472 spin_lock_irqsave(&tp->indirect_lock, flags);
473 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
477 /* Always leave this as zero. */
478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
480 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481 tw32_f(TG3PCI_MEM_WIN_DATA, val);
483 /* Always leave this as zero. */
484 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
504 /* Always leave this as zero. */
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508 *val = tr32(TG3PCI_MEM_WIN_DATA);
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
513 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 static void tg3_ape_lock_init(struct tg3 *tp)
520 /* Make sure the driver hasn't any stale locks. */
521 for (i = 0; i < 8; i++)
522 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523 APE_LOCK_GRANT_DRIVER);
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
532 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
536 case TG3_APE_LOCK_MEM:
544 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
546 /* Wait for up to 1 millisecond to acquire lock. */
547 for (i = 0; i < 100; i++) {
548 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549 if (status == APE_LOCK_GRANT_DRIVER)
554 if (status != APE_LOCK_GRANT_DRIVER) {
555 /* Revoke the lock request. */
556 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557 APE_LOCK_GRANT_DRIVER);
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
569 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
573 case TG3_APE_LOCK_MEM:
580 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
583 static void tg3_disable_ints(struct tg3 *tp)
585 tw32(TG3PCI_MISC_HOST_CTRL,
586 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
590 static inline void tg3_cond_int(struct tg3 *tp)
592 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593 (tp->hw_status->status & SD_STATUS_UPDATED))
594 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
596 tw32(HOSTCC_MODE, tp->coalesce_mode |
597 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
600 static void tg3_enable_ints(struct tg3 *tp)
605 tw32(TG3PCI_MISC_HOST_CTRL,
606 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608 (tp->last_tag << 24));
609 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611 (tp->last_tag << 24));
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
617 struct tg3_hw_status *sblk = tp->hw_status;
618 unsigned int work_exists = 0;
620 /* check for phy events */
621 if (!(tp->tg3_flags &
622 (TG3_FLAG_USE_LINKCHG_REG |
623 TG3_FLAG_POLL_SERDES))) {
624 if (sblk->status & SD_STATUS_LINK_CHG)
627 /* check for RX/TX work to do */
628 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
636 * similar to tg3_enable_ints, but it accurately determines whether there
637 * is new work pending and can return without flushing the PIO write
638 * which reenables interrupts
640 static void tg3_restart_ints(struct tg3 *tp)
642 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
646 /* When doing tagged status, this work check is unnecessary.
647 * The last_tag we write above tells the chip which piece of
648 * work we've completed.
650 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
656 static inline void tg3_netif_stop(struct tg3 *tp)
658 tp->dev->trans_start = jiffies; /* prevent tx timeout */
659 napi_disable(&tp->napi);
660 netif_tx_disable(tp->dev);
663 static inline void tg3_netif_start(struct tg3 *tp)
665 netif_wake_queue(tp->dev);
666 /* NOTE: unconditional netif_wake_queue is only appropriate
667 * so long as all callers are assured to have free tx slots
668 * (such as after tg3_init_hw)
670 napi_enable(&tp->napi);
671 tp->hw_status->status |= SD_STATUS_UPDATED;
675 static void tg3_switch_clocks(struct tg3 *tp)
677 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
680 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
684 orig_clock_ctrl = clock_ctrl;
685 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686 CLOCK_CTRL_CLKRUN_OENABLE |
688 tp->pci_clock_ctrl = clock_ctrl;
690 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692 tw32_wait_f(TG3PCI_CLOCK_CTRL,
693 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
695 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696 tw32_wait_f(TG3PCI_CLOCK_CTRL,
698 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
700 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701 clock_ctrl | (CLOCK_CTRL_ALTCLK),
704 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
707 #define PHY_BUSY_LOOPS 5000
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
723 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724 MI_COM_PHY_ADDR_MASK);
725 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726 MI_COM_REG_ADDR_MASK);
727 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
729 tw32_f(MAC_MI_COM, frame_val);
731 loops = PHY_BUSY_LOOPS;
734 frame_val = tr32(MAC_MI_COM);
736 if ((frame_val & MI_COM_BUSY) == 0) {
738 frame_val = tr32(MAC_MI_COM);
746 *val = frame_val & MI_COM_DATA_MASK;
750 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751 tw32_f(MAC_MI_MODE, tp->mi_mode);
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
768 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
770 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
774 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775 MI_COM_PHY_ADDR_MASK);
776 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777 MI_COM_REG_ADDR_MASK);
778 frame_val |= (val & MI_COM_DATA_MASK);
779 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
781 tw32_f(MAC_MI_COM, frame_val);
783 loops = PHY_BUSY_LOOPS;
786 frame_val = tr32(MAC_MI_COM);
787 if ((frame_val & MI_COM_BUSY) == 0) {
789 frame_val = tr32(MAC_MI_COM);
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800 tw32_f(MAC_MI_MODE, tp->mi_mode);
807 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
809 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
810 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
813 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
817 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
818 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
821 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
824 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
825 tg3_writephy(tp, MII_TG3_EPHY_TEST,
826 ephy | MII_TG3_EPHY_SHADOW_EN);
827 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
829 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
831 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
832 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
834 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
837 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
838 MII_TG3_AUXCTL_SHDWSEL_MISC;
839 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
840 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
842 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
844 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
845 phy |= MII_TG3_AUXCTL_MISC_WREN;
846 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
851 static void tg3_phy_set_wirespeed(struct tg3 *tp)
855 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
858 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
859 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
860 tg3_writephy(tp, MII_TG3_AUX_CTRL,
861 (val | (1 << 15) | (1 << 4)));
864 static int tg3_bmcr_reset(struct tg3 *tp)
869 /* OK, reset it, and poll the BMCR_RESET bit until it
870 * clears or we time out.
872 phy_control = BMCR_RESET;
873 err = tg3_writephy(tp, MII_BMCR, phy_control);
879 err = tg3_readphy(tp, MII_BMCR, &phy_control);
883 if ((phy_control & BMCR_RESET) == 0) {
895 static void tg3_phy_apply_otp(struct tg3 *tp)
904 /* Enable SM_DSP clock and tx 6dB coding. */
905 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
906 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
907 MII_TG3_AUXCTL_ACTL_TX_6DB;
908 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
910 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
911 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
912 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
914 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
915 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
916 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
918 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
919 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
920 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
922 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
923 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
925 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
926 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
928 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
929 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
930 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
932 /* Turn off SM_DSP clock. */
933 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
934 MII_TG3_AUXCTL_ACTL_TX_6DB;
935 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
938 static int tg3_wait_macro_done(struct tg3 *tp)
945 if (!tg3_readphy(tp, 0x16, &tmp32)) {
946 if ((tmp32 & 0x1000) == 0)
956 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
958 static const u32 test_pat[4][6] = {
959 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
960 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
961 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
962 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
966 for (chan = 0; chan < 4; chan++) {
969 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
970 (chan * 0x2000) | 0x0200);
971 tg3_writephy(tp, 0x16, 0x0002);
973 for (i = 0; i < 6; i++)
974 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
977 tg3_writephy(tp, 0x16, 0x0202);
978 if (tg3_wait_macro_done(tp)) {
983 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
984 (chan * 0x2000) | 0x0200);
985 tg3_writephy(tp, 0x16, 0x0082);
986 if (tg3_wait_macro_done(tp)) {
991 tg3_writephy(tp, 0x16, 0x0802);
992 if (tg3_wait_macro_done(tp)) {
997 for (i = 0; i < 6; i += 2) {
1000 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1001 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1002 tg3_wait_macro_done(tp)) {
1008 if (low != test_pat[chan][i] ||
1009 high != test_pat[chan][i+1]) {
1010 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1011 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1012 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1022 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1026 for (chan = 0; chan < 4; chan++) {
1029 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1030 (chan * 0x2000) | 0x0200);
1031 tg3_writephy(tp, 0x16, 0x0002);
1032 for (i = 0; i < 6; i++)
1033 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1034 tg3_writephy(tp, 0x16, 0x0202);
1035 if (tg3_wait_macro_done(tp))
1042 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1044 u32 reg32, phy9_orig;
1045 int retries, do_phy_reset, err;
1051 err = tg3_bmcr_reset(tp);
1057 /* Disable transmitter and interrupt. */
1058 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1062 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1064 /* Set full-duplex, 1000 mbps. */
1065 tg3_writephy(tp, MII_BMCR,
1066 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1068 /* Set to master mode. */
1069 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1072 tg3_writephy(tp, MII_TG3_CTRL,
1073 (MII_TG3_CTRL_AS_MASTER |
1074 MII_TG3_CTRL_ENABLE_AS_MASTER));
1076 /* Enable SM_DSP_CLOCK and 6dB. */
1077 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1079 /* Block the PHY control access. */
1080 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1081 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1083 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1086 } while (--retries);
1088 err = tg3_phy_reset_chanpat(tp);
1092 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1093 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1095 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1096 tg3_writephy(tp, 0x16, 0x0000);
1098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1099 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1100 /* Set Extended packet length bit for jumbo frames */
1101 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1104 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1107 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1109 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1111 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1118 static void tg3_link_report(struct tg3 *);
1120 /* This will reset the tigon3 PHY if there is no valid
1121 * link unless the FORCE argument is non-zero.
1123 static int tg3_phy_reset(struct tg3 *tp)
1129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1132 val = tr32(GRC_MISC_CFG);
1133 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1136 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1137 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1141 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1142 netif_carrier_off(tp->dev);
1143 tg3_link_report(tp);
1146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1147 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1148 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1149 err = tg3_phy_reset_5703_4_5(tp);
1156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1157 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1158 cpmuctrl = tr32(TG3_CPMU_CTRL);
1159 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1161 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1164 err = tg3_bmcr_reset(tp);
1168 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1171 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1172 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1174 tw32(TG3_CPMU_CTRL, cpmuctrl);
1177 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1180 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1181 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1182 CPMU_LSPD_1000MB_MACCLK_12_5) {
1183 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1185 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1188 /* Disable GPHY autopowerdown. */
1189 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1190 MII_TG3_MISC_SHDW_WREN |
1191 MII_TG3_MISC_SHDW_APD_SEL |
1192 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1195 tg3_phy_apply_otp(tp);
1198 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1199 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1200 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1201 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1202 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1203 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1204 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1206 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1207 tg3_writephy(tp, 0x1c, 0x8d68);
1208 tg3_writephy(tp, 0x1c, 0x8d68);
1210 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1211 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1212 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1213 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1214 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1215 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1216 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1217 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1218 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1220 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1221 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1222 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1223 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1224 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1225 tg3_writephy(tp, MII_TG3_TEST1,
1226 MII_TG3_TEST1_TRIM_EN | 0x4);
1228 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1229 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1231 /* Set Extended packet length bit (bit 14) on all chips that */
1232 /* support jumbo frames */
1233 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1234 /* Cannot do read-modify-write on 5401 */
1235 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1236 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1239 /* Set bit 14 with read-modify-write to preserve other bits */
1240 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1241 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1242 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1245 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1246 * jumbo frames transmission.
1248 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1251 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1252 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1253 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1257 /* adjust output voltage */
1258 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1261 tg3_phy_toggle_automdix(tp, 1);
1262 tg3_phy_set_wirespeed(tp);
1266 static void tg3_frob_aux_power(struct tg3 *tp)
1268 struct tg3 *tp_peer = tp;
1270 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1273 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1274 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1275 struct net_device *dev_peer;
1277 dev_peer = pci_get_drvdata(tp->pdev_peer);
1278 /* remove_one() may have been run on the peer. */
1282 tp_peer = netdev_priv(dev_peer);
1285 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1286 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1287 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1288 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1291 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1292 (GRC_LCLCTRL_GPIO_OE0 |
1293 GRC_LCLCTRL_GPIO_OE1 |
1294 GRC_LCLCTRL_GPIO_OE2 |
1295 GRC_LCLCTRL_GPIO_OUTPUT0 |
1296 GRC_LCLCTRL_GPIO_OUTPUT1),
1300 u32 grc_local_ctrl = 0;
1302 if (tp_peer != tp &&
1303 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1306 /* Workaround to prevent overdrawing Amps. */
1307 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1309 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1310 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1311 grc_local_ctrl, 100);
1314 /* On 5753 and variants, GPIO2 cannot be used. */
1315 no_gpio2 = tp->nic_sram_data_cfg &
1316 NIC_SRAM_DATA_CFG_NO_GPIO2;
1318 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1319 GRC_LCLCTRL_GPIO_OE1 |
1320 GRC_LCLCTRL_GPIO_OE2 |
1321 GRC_LCLCTRL_GPIO_OUTPUT1 |
1322 GRC_LCLCTRL_GPIO_OUTPUT2;
1324 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1325 GRC_LCLCTRL_GPIO_OUTPUT2);
1327 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1328 grc_local_ctrl, 100);
1330 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1332 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1333 grc_local_ctrl, 100);
1336 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1337 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1338 grc_local_ctrl, 100);
1342 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1343 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1344 if (tp_peer != tp &&
1345 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1348 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1349 (GRC_LCLCTRL_GPIO_OE1 |
1350 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1352 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1353 GRC_LCLCTRL_GPIO_OE1, 100);
1355 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1356 (GRC_LCLCTRL_GPIO_OE1 |
1357 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1362 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1364 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1366 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1367 if (speed != SPEED_10)
1369 } else if (speed == SPEED_10)
1375 static int tg3_setup_phy(struct tg3 *, int);
1377 #define RESET_KIND_SHUTDOWN 0
1378 #define RESET_KIND_INIT 1
1379 #define RESET_KIND_SUSPEND 2
1381 static void tg3_write_sig_post_reset(struct tg3 *, int);
1382 static int tg3_halt_cpu(struct tg3 *, u32);
1383 static int tg3_nvram_lock(struct tg3 *);
1384 static void tg3_nvram_unlock(struct tg3 *);
1386 static void tg3_power_down_phy(struct tg3 *tp)
1390 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1392 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1393 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1396 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1397 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1398 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1403 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1405 val = tr32(GRC_MISC_CFG);
1406 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1410 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1411 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1412 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1415 /* The PHY should not be powered down on some chips because
1418 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1419 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1420 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1421 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1424 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1425 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1426 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1427 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1428 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1431 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1434 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1437 u16 power_control, power_caps;
1438 int pm = tp->pm_cap;
1440 /* Make sure register accesses (indirect or otherwise)
1441 * will function correctly.
1443 pci_write_config_dword(tp->pdev,
1444 TG3PCI_MISC_HOST_CTRL,
1445 tp->misc_host_ctrl);
1447 pci_read_config_word(tp->pdev,
1450 power_control |= PCI_PM_CTRL_PME_STATUS;
1451 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1455 pci_write_config_word(tp->pdev,
1458 udelay(100); /* Delay after power state change */
1460 /* Switch out of Vaux if it is a NIC */
1461 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1462 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1479 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1481 tp->dev->name, state);
1485 power_control |= PCI_PM_CTRL_PME_ENABLE;
1487 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1488 tw32(TG3PCI_MISC_HOST_CTRL,
1489 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1491 if (tp->link_config.phy_is_low_power == 0) {
1492 tp->link_config.phy_is_low_power = 1;
1493 tp->link_config.orig_speed = tp->link_config.speed;
1494 tp->link_config.orig_duplex = tp->link_config.duplex;
1495 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1498 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1499 tp->link_config.speed = SPEED_10;
1500 tp->link_config.duplex = DUPLEX_HALF;
1501 tp->link_config.autoneg = AUTONEG_ENABLE;
1502 tg3_setup_phy(tp, 0);
1505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1508 val = tr32(GRC_VCPU_EXT_CTRL);
1509 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1510 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1514 for (i = 0; i < 200; i++) {
1515 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1516 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1521 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1522 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1523 WOL_DRV_STATE_SHUTDOWN |
1527 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1529 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1532 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1533 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1536 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1537 mac_mode = MAC_MODE_PORT_MODE_GMII;
1539 mac_mode = MAC_MODE_PORT_MODE_MII;
1541 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1542 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1544 u32 speed = (tp->tg3_flags &
1545 TG3_FLAG_WOL_SPEED_100MB) ?
1546 SPEED_100 : SPEED_10;
1547 if (tg3_5700_link_polarity(tp, speed))
1548 mac_mode |= MAC_MODE_LINK_POLARITY;
1550 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1553 mac_mode = MAC_MODE_PORT_MODE_TBI;
1556 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1557 tw32(MAC_LED_CTRL, tp->led_ctrl);
1559 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1560 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1561 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1563 tw32_f(MAC_MODE, mac_mode);
1566 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1570 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1571 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1575 base_val = tp->pci_clock_ctrl;
1576 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1577 CLOCK_CTRL_TXCLK_DISABLE);
1579 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1580 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1581 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1582 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1583 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1585 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1586 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1587 u32 newbits1, newbits2;
1589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1591 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1592 CLOCK_CTRL_TXCLK_DISABLE |
1594 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1595 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1596 newbits1 = CLOCK_CTRL_625_CORE;
1597 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1599 newbits1 = CLOCK_CTRL_ALTCLK;
1600 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1603 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1606 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1609 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1614 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1615 CLOCK_CTRL_TXCLK_DISABLE |
1616 CLOCK_CTRL_44MHZ_CORE);
1618 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1621 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1622 tp->pci_clock_ctrl | newbits3, 40);
1626 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1627 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1628 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1629 tg3_power_down_phy(tp);
1631 tg3_frob_aux_power(tp);
1633 /* Workaround for unstable PLL clock */
1634 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1635 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1636 u32 val = tr32(0x7d00);
1638 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1640 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1643 err = tg3_nvram_lock(tp);
1644 tg3_halt_cpu(tp, RX_CPU_BASE);
1646 tg3_nvram_unlock(tp);
1650 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1652 /* Finally, set the new power state. */
1653 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1654 udelay(100); /* Delay after power state change */
1659 static void tg3_link_report(struct tg3 *tp)
1661 if (!netif_carrier_ok(tp->dev)) {
1662 if (netif_msg_link(tp))
1663 printk(KERN_INFO PFX "%s: Link is down.\n",
1665 } else if (netif_msg_link(tp)) {
1666 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1668 (tp->link_config.active_speed == SPEED_1000 ?
1670 (tp->link_config.active_speed == SPEED_100 ?
1672 (tp->link_config.active_duplex == DUPLEX_FULL ?
1675 printk(KERN_INFO PFX
1676 "%s: Flow control is %s for TX and %s for RX.\n",
1678 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1680 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1685 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1689 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1690 miireg = ADVERTISE_PAUSE_CAP;
1691 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1692 miireg = ADVERTISE_PAUSE_ASYM;
1693 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1694 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1701 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1705 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1706 miireg = ADVERTISE_1000XPAUSE;
1707 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1708 miireg = ADVERTISE_1000XPSE_ASYM;
1709 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1710 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1717 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1721 if (lcladv & ADVERTISE_PAUSE_CAP) {
1722 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1723 if (rmtadv & LPA_PAUSE_CAP)
1724 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1725 else if (rmtadv & LPA_PAUSE_ASYM)
1726 cap = TG3_FLOW_CTRL_RX;
1728 if (rmtadv & LPA_PAUSE_CAP)
1729 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1731 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1732 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1733 cap = TG3_FLOW_CTRL_TX;
1739 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1743 if (lcladv & ADVERTISE_1000XPAUSE) {
1744 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1745 if (rmtadv & LPA_1000XPAUSE)
1746 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1747 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1748 cap = TG3_FLOW_CTRL_RX;
1750 if (rmtadv & LPA_1000XPAUSE)
1751 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1753 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1754 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1755 cap = TG3_FLOW_CTRL_TX;
1761 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1763 u8 new_tg3_flags = 0;
1764 u32 old_rx_mode = tp->rx_mode;
1765 u32 old_tx_mode = tp->tx_mode;
1767 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1768 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1769 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1770 new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1773 new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1776 new_tg3_flags = tp->link_config.flowctrl;
1779 tp->link_config.active_flowctrl = new_tg3_flags;
1781 if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1782 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1784 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1786 if (old_rx_mode != tp->rx_mode) {
1787 tw32_f(MAC_RX_MODE, tp->rx_mode);
1790 if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1791 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1793 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1795 if (old_tx_mode != tp->tx_mode) {
1796 tw32_f(MAC_TX_MODE, tp->tx_mode);
1800 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1802 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1803 case MII_TG3_AUX_STAT_10HALF:
1805 *duplex = DUPLEX_HALF;
1808 case MII_TG3_AUX_STAT_10FULL:
1810 *duplex = DUPLEX_FULL;
1813 case MII_TG3_AUX_STAT_100HALF:
1815 *duplex = DUPLEX_HALF;
1818 case MII_TG3_AUX_STAT_100FULL:
1820 *duplex = DUPLEX_FULL;
1823 case MII_TG3_AUX_STAT_1000HALF:
1824 *speed = SPEED_1000;
1825 *duplex = DUPLEX_HALF;
1828 case MII_TG3_AUX_STAT_1000FULL:
1829 *speed = SPEED_1000;
1830 *duplex = DUPLEX_FULL;
1834 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1835 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1837 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1841 *speed = SPEED_INVALID;
1842 *duplex = DUPLEX_INVALID;
1847 static void tg3_phy_copper_begin(struct tg3 *tp)
1852 if (tp->link_config.phy_is_low_power) {
1853 /* Entering low power mode. Disable gigabit and
1854 * 100baseT advertisements.
1856 tg3_writephy(tp, MII_TG3_CTRL, 0);
1858 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1859 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1860 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1861 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1863 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1864 } else if (tp->link_config.speed == SPEED_INVALID) {
1865 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1866 tp->link_config.advertising &=
1867 ~(ADVERTISED_1000baseT_Half |
1868 ADVERTISED_1000baseT_Full);
1870 new_adv = ADVERTISE_CSMA;
1871 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1872 new_adv |= ADVERTISE_10HALF;
1873 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1874 new_adv |= ADVERTISE_10FULL;
1875 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1876 new_adv |= ADVERTISE_100HALF;
1877 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1878 new_adv |= ADVERTISE_100FULL;
1880 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1882 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1884 if (tp->link_config.advertising &
1885 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1887 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1888 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1889 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1890 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1891 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1892 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1893 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1894 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1895 MII_TG3_CTRL_ENABLE_AS_MASTER);
1896 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1898 tg3_writephy(tp, MII_TG3_CTRL, 0);
1901 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1902 new_adv |= ADVERTISE_CSMA;
1904 /* Asking for a specific link mode. */
1905 if (tp->link_config.speed == SPEED_1000) {
1906 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1908 if (tp->link_config.duplex == DUPLEX_FULL)
1909 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1911 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1912 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1913 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1914 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1915 MII_TG3_CTRL_ENABLE_AS_MASTER);
1917 if (tp->link_config.speed == SPEED_100) {
1918 if (tp->link_config.duplex == DUPLEX_FULL)
1919 new_adv |= ADVERTISE_100FULL;
1921 new_adv |= ADVERTISE_100HALF;
1923 if (tp->link_config.duplex == DUPLEX_FULL)
1924 new_adv |= ADVERTISE_10FULL;
1926 new_adv |= ADVERTISE_10HALF;
1928 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1933 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1936 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1937 tp->link_config.speed != SPEED_INVALID) {
1938 u32 bmcr, orig_bmcr;
1940 tp->link_config.active_speed = tp->link_config.speed;
1941 tp->link_config.active_duplex = tp->link_config.duplex;
1944 switch (tp->link_config.speed) {
1950 bmcr |= BMCR_SPEED100;
1954 bmcr |= TG3_BMCR_SPEED1000;
1958 if (tp->link_config.duplex == DUPLEX_FULL)
1959 bmcr |= BMCR_FULLDPLX;
1961 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1962 (bmcr != orig_bmcr)) {
1963 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1964 for (i = 0; i < 1500; i++) {
1968 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1969 tg3_readphy(tp, MII_BMSR, &tmp))
1971 if (!(tmp & BMSR_LSTATUS)) {
1976 tg3_writephy(tp, MII_BMCR, bmcr);
1980 tg3_writephy(tp, MII_BMCR,
1981 BMCR_ANENABLE | BMCR_ANRESTART);
1985 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1989 /* Turn off tap power management. */
1990 /* Set Extended packet length bit */
1991 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1993 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1994 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1996 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1997 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1999 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2000 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2002 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2003 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2005 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2006 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2013 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2015 u32 adv_reg, all_mask = 0;
2017 if (mask & ADVERTISED_10baseT_Half)
2018 all_mask |= ADVERTISE_10HALF;
2019 if (mask & ADVERTISED_10baseT_Full)
2020 all_mask |= ADVERTISE_10FULL;
2021 if (mask & ADVERTISED_100baseT_Half)
2022 all_mask |= ADVERTISE_100HALF;
2023 if (mask & ADVERTISED_100baseT_Full)
2024 all_mask |= ADVERTISE_100FULL;
2026 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2029 if ((adv_reg & all_mask) != all_mask)
2031 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2035 if (mask & ADVERTISED_1000baseT_Half)
2036 all_mask |= ADVERTISE_1000HALF;
2037 if (mask & ADVERTISED_1000baseT_Full)
2038 all_mask |= ADVERTISE_1000FULL;
2040 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2043 if ((tg3_ctrl & all_mask) != all_mask)
2049 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2053 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2056 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2057 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2059 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2060 if (curadv != reqadv)
2063 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2064 tg3_readphy(tp, MII_LPA, rmtadv);
2066 /* Reprogram the advertisement register, even if it
2067 * does not affect the current link. If the link
2068 * gets renegotiated in the future, we can save an
2069 * additional renegotiation cycle by advertising
2070 * it correctly in the first place.
2072 if (curadv != reqadv) {
2073 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2074 ADVERTISE_PAUSE_ASYM);
2075 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2082 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2084 int current_link_up;
2086 u32 lcl_adv, rmt_adv;
2094 (MAC_STATUS_SYNC_CHANGED |
2095 MAC_STATUS_CFG_CHANGED |
2096 MAC_STATUS_MI_COMPLETION |
2097 MAC_STATUS_LNKSTATE_CHANGED));
2100 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2102 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2106 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2108 /* Some third-party PHYs need to be reset on link going
2111 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2112 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2114 netif_carrier_ok(tp->dev)) {
2115 tg3_readphy(tp, MII_BMSR, &bmsr);
2116 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2117 !(bmsr & BMSR_LSTATUS))
2123 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2124 tg3_readphy(tp, MII_BMSR, &bmsr);
2125 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2126 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2129 if (!(bmsr & BMSR_LSTATUS)) {
2130 err = tg3_init_5401phy_dsp(tp);
2134 tg3_readphy(tp, MII_BMSR, &bmsr);
2135 for (i = 0; i < 1000; i++) {
2137 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2138 (bmsr & BMSR_LSTATUS)) {
2144 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2145 !(bmsr & BMSR_LSTATUS) &&
2146 tp->link_config.active_speed == SPEED_1000) {
2147 err = tg3_phy_reset(tp);
2149 err = tg3_init_5401phy_dsp(tp);
2154 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2155 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2156 /* 5701 {A0,B0} CRC bug workaround */
2157 tg3_writephy(tp, 0x15, 0x0a75);
2158 tg3_writephy(tp, 0x1c, 0x8c68);
2159 tg3_writephy(tp, 0x1c, 0x8d68);
2160 tg3_writephy(tp, 0x1c, 0x8c68);
2163 /* Clear pending interrupts... */
2164 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2165 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2167 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2168 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2169 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2170 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2174 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2175 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2176 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2178 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2181 current_link_up = 0;
2182 current_speed = SPEED_INVALID;
2183 current_duplex = DUPLEX_INVALID;
2185 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2188 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2189 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2190 if (!(val & (1 << 10))) {
2192 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2198 for (i = 0; i < 100; i++) {
2199 tg3_readphy(tp, MII_BMSR, &bmsr);
2200 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2201 (bmsr & BMSR_LSTATUS))
2206 if (bmsr & BMSR_LSTATUS) {
2209 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2210 for (i = 0; i < 2000; i++) {
2212 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2217 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2222 for (i = 0; i < 200; i++) {
2223 tg3_readphy(tp, MII_BMCR, &bmcr);
2224 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2226 if (bmcr && bmcr != 0x7fff)
2234 tp->link_config.active_speed = current_speed;
2235 tp->link_config.active_duplex = current_duplex;
2237 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2238 if ((bmcr & BMCR_ANENABLE) &&
2239 tg3_copper_is_advertising_all(tp,
2240 tp->link_config.advertising)) {
2241 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2243 current_link_up = 1;
2246 if (!(bmcr & BMCR_ANENABLE) &&
2247 tp->link_config.speed == current_speed &&
2248 tp->link_config.duplex == current_duplex &&
2249 tp->link_config.flowctrl ==
2250 tp->link_config.active_flowctrl) {
2251 current_link_up = 1;
2255 if (current_link_up == 1 &&
2256 tp->link_config.active_duplex == DUPLEX_FULL)
2257 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2261 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2264 tg3_phy_copper_begin(tp);
2266 tg3_readphy(tp, MII_BMSR, &tmp);
2267 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2268 (tmp & BMSR_LSTATUS))
2269 current_link_up = 1;
2272 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2273 if (current_link_up == 1) {
2274 if (tp->link_config.active_speed == SPEED_100 ||
2275 tp->link_config.active_speed == SPEED_10)
2276 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2278 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2280 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2282 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2283 if (tp->link_config.active_duplex == DUPLEX_HALF)
2284 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2286 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2287 if (current_link_up == 1 &&
2288 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2289 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2291 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2294 /* ??? Without this setting Netgear GA302T PHY does not
2295 * ??? send/receive packets...
2297 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2298 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2299 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2300 tw32_f(MAC_MI_MODE, tp->mi_mode);
2304 tw32_f(MAC_MODE, tp->mac_mode);
2307 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2308 /* Polled via timer. */
2309 tw32_f(MAC_EVENT, 0);
2311 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2316 current_link_up == 1 &&
2317 tp->link_config.active_speed == SPEED_1000 &&
2318 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2319 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2322 (MAC_STATUS_SYNC_CHANGED |
2323 MAC_STATUS_CFG_CHANGED));
2326 NIC_SRAM_FIRMWARE_MBOX,
2327 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2330 if (current_link_up != netif_carrier_ok(tp->dev)) {
2331 if (current_link_up)
2332 netif_carrier_on(tp->dev);
2334 netif_carrier_off(tp->dev);
2335 tg3_link_report(tp);
2341 struct tg3_fiber_aneginfo {
2343 #define ANEG_STATE_UNKNOWN 0
2344 #define ANEG_STATE_AN_ENABLE 1
2345 #define ANEG_STATE_RESTART_INIT 2
2346 #define ANEG_STATE_RESTART 3
2347 #define ANEG_STATE_DISABLE_LINK_OK 4
2348 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2349 #define ANEG_STATE_ABILITY_DETECT 6
2350 #define ANEG_STATE_ACK_DETECT_INIT 7
2351 #define ANEG_STATE_ACK_DETECT 8
2352 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2353 #define ANEG_STATE_COMPLETE_ACK 10
2354 #define ANEG_STATE_IDLE_DETECT_INIT 11
2355 #define ANEG_STATE_IDLE_DETECT 12
2356 #define ANEG_STATE_LINK_OK 13
2357 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2358 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2361 #define MR_AN_ENABLE 0x00000001
2362 #define MR_RESTART_AN 0x00000002
2363 #define MR_AN_COMPLETE 0x00000004
2364 #define MR_PAGE_RX 0x00000008
2365 #define MR_NP_LOADED 0x00000010
2366 #define MR_TOGGLE_TX 0x00000020
2367 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2368 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2369 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2370 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2371 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2372 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2373 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2374 #define MR_TOGGLE_RX 0x00002000
2375 #define MR_NP_RX 0x00004000
2377 #define MR_LINK_OK 0x80000000
2379 unsigned long link_time, cur_time;
2381 u32 ability_match_cfg;
2382 int ability_match_count;
2384 char ability_match, idle_match, ack_match;
2386 u32 txconfig, rxconfig;
2387 #define ANEG_CFG_NP 0x00000080
2388 #define ANEG_CFG_ACK 0x00000040
2389 #define ANEG_CFG_RF2 0x00000020
2390 #define ANEG_CFG_RF1 0x00000010
2391 #define ANEG_CFG_PS2 0x00000001
2392 #define ANEG_CFG_PS1 0x00008000
2393 #define ANEG_CFG_HD 0x00004000
2394 #define ANEG_CFG_FD 0x00002000
2395 #define ANEG_CFG_INVAL 0x00001f06
2400 #define ANEG_TIMER_ENAB 2
2401 #define ANEG_FAILED -1
2403 #define ANEG_STATE_SETTLE_TIME 10000
2405 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2406 struct tg3_fiber_aneginfo *ap)
2409 unsigned long delta;
2413 if (ap->state == ANEG_STATE_UNKNOWN) {
2417 ap->ability_match_cfg = 0;
2418 ap->ability_match_count = 0;
2419 ap->ability_match = 0;
2425 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2426 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2428 if (rx_cfg_reg != ap->ability_match_cfg) {
2429 ap->ability_match_cfg = rx_cfg_reg;
2430 ap->ability_match = 0;
2431 ap->ability_match_count = 0;
2433 if (++ap->ability_match_count > 1) {
2434 ap->ability_match = 1;
2435 ap->ability_match_cfg = rx_cfg_reg;
2438 if (rx_cfg_reg & ANEG_CFG_ACK)
2446 ap->ability_match_cfg = 0;
2447 ap->ability_match_count = 0;
2448 ap->ability_match = 0;
2454 ap->rxconfig = rx_cfg_reg;
2458 case ANEG_STATE_UNKNOWN:
2459 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2460 ap->state = ANEG_STATE_AN_ENABLE;
2463 case ANEG_STATE_AN_ENABLE:
2464 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2465 if (ap->flags & MR_AN_ENABLE) {
2468 ap->ability_match_cfg = 0;
2469 ap->ability_match_count = 0;
2470 ap->ability_match = 0;
2474 ap->state = ANEG_STATE_RESTART_INIT;
2476 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2480 case ANEG_STATE_RESTART_INIT:
2481 ap->link_time = ap->cur_time;
2482 ap->flags &= ~(MR_NP_LOADED);
2484 tw32(MAC_TX_AUTO_NEG, 0);
2485 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2486 tw32_f(MAC_MODE, tp->mac_mode);
2489 ret = ANEG_TIMER_ENAB;
2490 ap->state = ANEG_STATE_RESTART;
2493 case ANEG_STATE_RESTART:
2494 delta = ap->cur_time - ap->link_time;
2495 if (delta > ANEG_STATE_SETTLE_TIME) {
2496 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2498 ret = ANEG_TIMER_ENAB;
2502 case ANEG_STATE_DISABLE_LINK_OK:
2506 case ANEG_STATE_ABILITY_DETECT_INIT:
2507 ap->flags &= ~(MR_TOGGLE_TX);
2508 ap->txconfig = ANEG_CFG_FD;
2509 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2510 if (flowctrl & ADVERTISE_1000XPAUSE)
2511 ap->txconfig |= ANEG_CFG_PS1;
2512 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2513 ap->txconfig |= ANEG_CFG_PS2;
2514 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2515 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2516 tw32_f(MAC_MODE, tp->mac_mode);
2519 ap->state = ANEG_STATE_ABILITY_DETECT;
2522 case ANEG_STATE_ABILITY_DETECT:
2523 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2524 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2528 case ANEG_STATE_ACK_DETECT_INIT:
2529 ap->txconfig |= ANEG_CFG_ACK;
2530 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2531 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2532 tw32_f(MAC_MODE, tp->mac_mode);
2535 ap->state = ANEG_STATE_ACK_DETECT;
2538 case ANEG_STATE_ACK_DETECT:
2539 if (ap->ack_match != 0) {
2540 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2541 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2542 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2544 ap->state = ANEG_STATE_AN_ENABLE;
2546 } else if (ap->ability_match != 0 &&
2547 ap->rxconfig == 0) {
2548 ap->state = ANEG_STATE_AN_ENABLE;
2552 case ANEG_STATE_COMPLETE_ACK_INIT:
2553 if (ap->rxconfig & ANEG_CFG_INVAL) {
2557 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2558 MR_LP_ADV_HALF_DUPLEX |
2559 MR_LP_ADV_SYM_PAUSE |
2560 MR_LP_ADV_ASYM_PAUSE |
2561 MR_LP_ADV_REMOTE_FAULT1 |
2562 MR_LP_ADV_REMOTE_FAULT2 |
2563 MR_LP_ADV_NEXT_PAGE |
2566 if (ap->rxconfig & ANEG_CFG_FD)
2567 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2568 if (ap->rxconfig & ANEG_CFG_HD)
2569 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2570 if (ap->rxconfig & ANEG_CFG_PS1)
2571 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2572 if (ap->rxconfig & ANEG_CFG_PS2)
2573 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2574 if (ap->rxconfig & ANEG_CFG_RF1)
2575 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2576 if (ap->rxconfig & ANEG_CFG_RF2)
2577 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2578 if (ap->rxconfig & ANEG_CFG_NP)
2579 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2581 ap->link_time = ap->cur_time;
2583 ap->flags ^= (MR_TOGGLE_TX);
2584 if (ap->rxconfig & 0x0008)
2585 ap->flags |= MR_TOGGLE_RX;
2586 if (ap->rxconfig & ANEG_CFG_NP)
2587 ap->flags |= MR_NP_RX;
2588 ap->flags |= MR_PAGE_RX;
2590 ap->state = ANEG_STATE_COMPLETE_ACK;
2591 ret = ANEG_TIMER_ENAB;
2594 case ANEG_STATE_COMPLETE_ACK:
2595 if (ap->ability_match != 0 &&
2596 ap->rxconfig == 0) {
2597 ap->state = ANEG_STATE_AN_ENABLE;
2600 delta = ap->cur_time - ap->link_time;
2601 if (delta > ANEG_STATE_SETTLE_TIME) {
2602 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2603 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2605 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2606 !(ap->flags & MR_NP_RX)) {
2607 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2615 case ANEG_STATE_IDLE_DETECT_INIT:
2616 ap->link_time = ap->cur_time;
2617 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2618 tw32_f(MAC_MODE, tp->mac_mode);
2621 ap->state = ANEG_STATE_IDLE_DETECT;
2622 ret = ANEG_TIMER_ENAB;
2625 case ANEG_STATE_IDLE_DETECT:
2626 if (ap->ability_match != 0 &&
2627 ap->rxconfig == 0) {
2628 ap->state = ANEG_STATE_AN_ENABLE;
2631 delta = ap->cur_time - ap->link_time;
2632 if (delta > ANEG_STATE_SETTLE_TIME) {
2633 /* XXX another gem from the Broadcom driver :( */
2634 ap->state = ANEG_STATE_LINK_OK;
2638 case ANEG_STATE_LINK_OK:
2639 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2643 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2644 /* ??? unimplemented */
2647 case ANEG_STATE_NEXT_PAGE_WAIT:
2648 /* ??? unimplemented */
2659 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
2662 struct tg3_fiber_aneginfo aninfo;
2663 int status = ANEG_FAILED;
2667 tw32_f(MAC_TX_AUTO_NEG, 0);
2669 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2670 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2673 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2676 memset(&aninfo, 0, sizeof(aninfo));
2677 aninfo.flags |= MR_AN_ENABLE;
2678 aninfo.state = ANEG_STATE_UNKNOWN;
2679 aninfo.cur_time = 0;
2681 while (++tick < 195000) {
2682 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2683 if (status == ANEG_DONE || status == ANEG_FAILED)
2689 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2690 tw32_f(MAC_MODE, tp->mac_mode);
2693 *txflags = aninfo.txconfig;
2694 *rxflags = aninfo.flags;
2696 if (status == ANEG_DONE &&
2697 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2698 MR_LP_ADV_FULL_DUPLEX)))
2704 static void tg3_init_bcm8002(struct tg3 *tp)
2706 u32 mac_status = tr32(MAC_STATUS);
2709 /* Reset when initting first time or we have a link. */
2710 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2711 !(mac_status & MAC_STATUS_PCS_SYNCED))
2714 /* Set PLL lock range. */
2715 tg3_writephy(tp, 0x16, 0x8007);
2718 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2720 /* Wait for reset to complete. */
2721 /* XXX schedule_timeout() ... */
2722 for (i = 0; i < 500; i++)
2725 /* Config mode; select PMA/Ch 1 regs. */
2726 tg3_writephy(tp, 0x10, 0x8411);
2728 /* Enable auto-lock and comdet, select txclk for tx. */
2729 tg3_writephy(tp, 0x11, 0x0a10);
2731 tg3_writephy(tp, 0x18, 0x00a0);
2732 tg3_writephy(tp, 0x16, 0x41ff);
2734 /* Assert and deassert POR. */
2735 tg3_writephy(tp, 0x13, 0x0400);
2737 tg3_writephy(tp, 0x13, 0x0000);
2739 tg3_writephy(tp, 0x11, 0x0a50);
2741 tg3_writephy(tp, 0x11, 0x0a10);
2743 /* Wait for signal to stabilize */
2744 /* XXX schedule_timeout() ... */
2745 for (i = 0; i < 15000; i++)
2748 /* Deselect the channel register so we can read the PHYID
2751 tg3_writephy(tp, 0x10, 0x8011);
2754 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2757 u32 sg_dig_ctrl, sg_dig_status;
2758 u32 serdes_cfg, expected_sg_dig_ctrl;
2759 int workaround, port_a;
2760 int current_link_up;
2763 expected_sg_dig_ctrl = 0;
2766 current_link_up = 0;
2768 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2769 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2771 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2774 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2775 /* preserve bits 20-23 for voltage regulator */
2776 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2779 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2781 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2782 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2784 u32 val = serdes_cfg;
2790 tw32_f(MAC_SERDES_CFG, val);
2793 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2795 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2796 tg3_setup_flow_control(tp, 0, 0);
2797 current_link_up = 1;
2802 /* Want auto-negotiation. */
2803 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2805 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2806 if (flowctrl & ADVERTISE_1000XPAUSE)
2807 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2808 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2809 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2811 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2812 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2813 tp->serdes_counter &&
2814 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2815 MAC_STATUS_RCVD_CFG)) ==
2816 MAC_STATUS_PCS_SYNCED)) {
2817 tp->serdes_counter--;
2818 current_link_up = 1;
2823 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2824 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2826 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2828 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2829 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2830 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2831 MAC_STATUS_SIGNAL_DET)) {
2832 sg_dig_status = tr32(SG_DIG_STATUS);
2833 mac_status = tr32(MAC_STATUS);
2835 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
2836 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2837 u32 local_adv = 0, remote_adv = 0;
2839 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2840 local_adv |= ADVERTISE_1000XPAUSE;
2841 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2842 local_adv |= ADVERTISE_1000XPSE_ASYM;
2844 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
2845 remote_adv |= LPA_1000XPAUSE;
2846 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
2847 remote_adv |= LPA_1000XPAUSE_ASYM;
2849 tg3_setup_flow_control(tp, local_adv, remote_adv);
2850 current_link_up = 1;
2851 tp->serdes_counter = 0;
2852 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2853 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
2854 if (tp->serdes_counter)
2855 tp->serdes_counter--;
2858 u32 val = serdes_cfg;
2865 tw32_f(MAC_SERDES_CFG, val);
2868 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2871 /* Link parallel detection - link is up */
2872 /* only if we have PCS_SYNC and not */
2873 /* receiving config code words */
2874 mac_status = tr32(MAC_STATUS);
2875 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2876 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2877 tg3_setup_flow_control(tp, 0, 0);
2878 current_link_up = 1;
2880 TG3_FLG2_PARALLEL_DETECT;
2881 tp->serdes_counter =
2882 SERDES_PARALLEL_DET_TIMEOUT;
2884 goto restart_autoneg;
2888 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2889 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2893 return current_link_up;
2896 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2898 int current_link_up = 0;
2900 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2903 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2904 u32 txflags, rxflags;
2907 if (fiber_autoneg(tp, &txflags, &rxflags)) {
2908 u32 local_adv = 0, remote_adv = 0;
2910 if (txflags & ANEG_CFG_PS1)
2911 local_adv |= ADVERTISE_1000XPAUSE;
2912 if (txflags & ANEG_CFG_PS2)
2913 local_adv |= ADVERTISE_1000XPSE_ASYM;
2915 if (rxflags & MR_LP_ADV_SYM_PAUSE)
2916 remote_adv |= LPA_1000XPAUSE;
2917 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
2918 remote_adv |= LPA_1000XPAUSE_ASYM;
2920 tg3_setup_flow_control(tp, local_adv, remote_adv);
2922 current_link_up = 1;
2924 for (i = 0; i < 30; i++) {
2927 (MAC_STATUS_SYNC_CHANGED |
2928 MAC_STATUS_CFG_CHANGED));
2930 if ((tr32(MAC_STATUS) &
2931 (MAC_STATUS_SYNC_CHANGED |
2932 MAC_STATUS_CFG_CHANGED)) == 0)
2936 mac_status = tr32(MAC_STATUS);
2937 if (current_link_up == 0 &&
2938 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2939 !(mac_status & MAC_STATUS_RCVD_CFG))
2940 current_link_up = 1;
2942 tg3_setup_flow_control(tp, 0, 0);
2944 /* Forcing 1000FD link up. */
2945 current_link_up = 1;
2947 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2950 tw32_f(MAC_MODE, tp->mac_mode);
2955 return current_link_up;
2958 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2961 u16 orig_active_speed;
2962 u8 orig_active_duplex;
2964 int current_link_up;
2967 orig_pause_cfg = tp->link_config.active_flowctrl;
2968 orig_active_speed = tp->link_config.active_speed;
2969 orig_active_duplex = tp->link_config.active_duplex;
2971 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2972 netif_carrier_ok(tp->dev) &&
2973 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2974 mac_status = tr32(MAC_STATUS);
2975 mac_status &= (MAC_STATUS_PCS_SYNCED |
2976 MAC_STATUS_SIGNAL_DET |
2977 MAC_STATUS_CFG_CHANGED |
2978 MAC_STATUS_RCVD_CFG);
2979 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2980 MAC_STATUS_SIGNAL_DET)) {
2981 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2982 MAC_STATUS_CFG_CHANGED));
2987 tw32_f(MAC_TX_AUTO_NEG, 0);
2989 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2990 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2991 tw32_f(MAC_MODE, tp->mac_mode);
2994 if (tp->phy_id == PHY_ID_BCM8002)
2995 tg3_init_bcm8002(tp);
2997 /* Enable link change event even when serdes polling. */
2998 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3001 current_link_up = 0;
3002 mac_status = tr32(MAC_STATUS);
3004 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3005 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3007 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3009 tp->hw_status->status =
3010 (SD_STATUS_UPDATED |
3011 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3013 for (i = 0; i < 100; i++) {
3014 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3015 MAC_STATUS_CFG_CHANGED));
3017 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3018 MAC_STATUS_CFG_CHANGED |
3019 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3023 mac_status = tr32(MAC_STATUS);
3024 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3025 current_link_up = 0;
3026 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3027 tp->serdes_counter == 0) {
3028 tw32_f(MAC_MODE, (tp->mac_mode |
3029 MAC_MODE_SEND_CONFIGS));
3031 tw32_f(MAC_MODE, tp->mac_mode);
3035 if (current_link_up == 1) {
3036 tp->link_config.active_speed = SPEED_1000;
3037 tp->link_config.active_duplex = DUPLEX_FULL;
3038 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3039 LED_CTRL_LNKLED_OVERRIDE |
3040 LED_CTRL_1000MBPS_ON));
3042 tp->link_config.active_speed = SPEED_INVALID;
3043 tp->link_config.active_duplex = DUPLEX_INVALID;
3044 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3045 LED_CTRL_LNKLED_OVERRIDE |
3046 LED_CTRL_TRAFFIC_OVERRIDE));
3049 if (current_link_up != netif_carrier_ok(tp->dev)) {
3050 if (current_link_up)
3051 netif_carrier_on(tp->dev);
3053 netif_carrier_off(tp->dev);
3054 tg3_link_report(tp);
3056 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3057 if (orig_pause_cfg != now_pause_cfg ||
3058 orig_active_speed != tp->link_config.active_speed ||
3059 orig_active_duplex != tp->link_config.active_duplex)
3060 tg3_link_report(tp);
3066 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3068 int current_link_up, err = 0;
3072 u32 local_adv, remote_adv;
3074 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3075 tw32_f(MAC_MODE, tp->mac_mode);
3081 (MAC_STATUS_SYNC_CHANGED |
3082 MAC_STATUS_CFG_CHANGED |
3083 MAC_STATUS_MI_COMPLETION |
3084 MAC_STATUS_LNKSTATE_CHANGED));
3090 current_link_up = 0;
3091 current_speed = SPEED_INVALID;
3092 current_duplex = DUPLEX_INVALID;
3094 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3095 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3097 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3098 bmsr |= BMSR_LSTATUS;
3100 bmsr &= ~BMSR_LSTATUS;
3103 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3105 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3106 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3107 tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
3108 /* do nothing, just check for link up at the end */
3109 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3112 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3113 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3114 ADVERTISE_1000XPAUSE |
3115 ADVERTISE_1000XPSE_ASYM |
3118 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3120 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3121 new_adv |= ADVERTISE_1000XHALF;
3122 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3123 new_adv |= ADVERTISE_1000XFULL;
3125 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3126 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3127 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3128 tg3_writephy(tp, MII_BMCR, bmcr);
3130 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3131 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3132 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3139 bmcr &= ~BMCR_SPEED1000;
3140 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3142 if (tp->link_config.duplex == DUPLEX_FULL)
3143 new_bmcr |= BMCR_FULLDPLX;
3145 if (new_bmcr != bmcr) {
3146 /* BMCR_SPEED1000 is a reserved bit that needs
3147 * to be set on write.
3149 new_bmcr |= BMCR_SPEED1000;
3151 /* Force a linkdown */
3152 if (netif_carrier_ok(tp->dev)) {
3155 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3156 adv &= ~(ADVERTISE_1000XFULL |
3157 ADVERTISE_1000XHALF |
3159 tg3_writephy(tp, MII_ADVERTISE, adv);
3160 tg3_writephy(tp, MII_BMCR, bmcr |
3164 netif_carrier_off(tp->dev);
3166 tg3_writephy(tp, MII_BMCR, new_bmcr);
3168 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3169 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3170 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3172 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3173 bmsr |= BMSR_LSTATUS;
3175 bmsr &= ~BMSR_LSTATUS;
3177 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3181 if (bmsr & BMSR_LSTATUS) {
3182 current_speed = SPEED_1000;
3183 current_link_up = 1;
3184 if (bmcr & BMCR_FULLDPLX)
3185 current_duplex = DUPLEX_FULL;
3187 current_duplex = DUPLEX_HALF;
3192 if (bmcr & BMCR_ANENABLE) {
3195 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3196 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3197 common = local_adv & remote_adv;
3198 if (common & (ADVERTISE_1000XHALF |
3199 ADVERTISE_1000XFULL)) {
3200 if (common & ADVERTISE_1000XFULL)
3201 current_duplex = DUPLEX_FULL;
3203 current_duplex = DUPLEX_HALF;
3206 current_link_up = 0;
3210 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3211 tg3_setup_flow_control(tp, local_adv, remote_adv);
3213 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3214 if (tp->link_config.active_duplex == DUPLEX_HALF)
3215 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3217 tw32_f(MAC_MODE, tp->mac_mode);
3220 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3222 tp->link_config.active_speed = current_speed;
3223 tp->link_config.active_duplex = current_duplex;
3225 if (current_link_up != netif_carrier_ok(tp->dev)) {
3226 if (current_link_up)
3227 netif_carrier_on(tp->dev);
3229 netif_carrier_off(tp->dev);
3230 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3232 tg3_link_report(tp);
3237 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3239 if (tp->serdes_counter) {
3240 /* Give autoneg time to complete. */
3241 tp->serdes_counter--;
3244 if (!netif_carrier_ok(tp->dev) &&
3245 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3248 tg3_readphy(tp, MII_BMCR, &bmcr);
3249 if (bmcr & BMCR_ANENABLE) {
3252 /* Select shadow register 0x1f */
3253 tg3_writephy(tp, 0x1c, 0x7c00);
3254 tg3_readphy(tp, 0x1c, &phy1);
3256 /* Select expansion interrupt status register */
3257 tg3_writephy(tp, 0x17, 0x0f01);
3258 tg3_readphy(tp, 0x15, &phy2);
3259 tg3_readphy(tp, 0x15, &phy2);
3261 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3262 /* We have signal detect and not receiving
3263 * config code words, link is up by parallel
3267 bmcr &= ~BMCR_ANENABLE;
3268 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3269 tg3_writephy(tp, MII_BMCR, bmcr);
3270 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3274 else if (netif_carrier_ok(tp->dev) &&
3275 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3276 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3279 /* Select expansion interrupt status register */
3280 tg3_writephy(tp, 0x17, 0x0f01);
3281 tg3_readphy(tp, 0x15, &phy2);
3285 /* Config code words received, turn on autoneg. */
3286 tg3_readphy(tp, MII_BMCR, &bmcr);
3287 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3289 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3295 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3299 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3300 err = tg3_setup_fiber_phy(tp, force_reset);
3301 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3302 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3304 err = tg3_setup_copper_phy(tp, force_reset);
3307 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3308 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3311 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3312 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3314 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3319 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3320 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3321 tw32(GRC_MISC_CFG, val);
3324 if (tp->link_config.active_speed == SPEED_1000 &&
3325 tp->link_config.active_duplex == DUPLEX_HALF)
3326 tw32(MAC_TX_LENGTHS,
3327 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3328 (6 << TX_LENGTHS_IPG_SHIFT) |
3329 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3331 tw32(MAC_TX_LENGTHS,
3332 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3333 (6 << TX_LENGTHS_IPG_SHIFT) |
3334 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3336 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3337 if (netif_carrier_ok(tp->dev)) {
3338 tw32(HOSTCC_STAT_COAL_TICKS,
3339 tp->coal.stats_block_coalesce_usecs);
3341 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3345 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3346 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3347 if (!netif_carrier_ok(tp->dev))
3348 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3351 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3352 tw32(PCIE_PWR_MGMT_THRESH, val);
3358 /* This is called whenever we suspect that the system chipset is re-
3359 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3360 * is bogus tx completions. We try to recover by setting the
3361 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3364 static void tg3_tx_recover(struct tg3 *tp)
3366 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3367 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3369 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3370 "mapped I/O cycles to the network device, attempting to "
3371 "recover. Please report the problem to the driver maintainer "
3372 "and include system chipset information.\n", tp->dev->name);
3374 spin_lock(&tp->lock);
3375 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3376 spin_unlock(&tp->lock);
3379 static inline u32 tg3_tx_avail(struct tg3 *tp)
3382 return (tp->tx_pending -
3383 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3386 /* Tigon3 never reports partial packet sends. So we do not
3387 * need special logic to handle SKBs that have not had all
3388 * of their frags sent yet, like SunGEM does.
3390 static void tg3_tx(struct tg3 *tp)
3392 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3393 u32 sw_idx = tp->tx_cons;
3395 while (sw_idx != hw_idx) {
3396 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3397 struct sk_buff *skb = ri->skb;
3400 if (unlikely(skb == NULL)) {
3405 pci_unmap_single(tp->pdev,
3406 pci_unmap_addr(ri, mapping),
3412 sw_idx = NEXT_TX(sw_idx);
3414 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3415 ri = &tp->tx_buffers[sw_idx];
3416 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3419 pci_unmap_page(tp->pdev,
3420 pci_unmap_addr(ri, mapping),
3421 skb_shinfo(skb)->frags[i].size,
3424 sw_idx = NEXT_TX(sw_idx);
3429 if (unlikely(tx_bug)) {
3435 tp->tx_cons = sw_idx;
3437 /* Need to make the tx_cons update visible to tg3_start_xmit()
3438 * before checking for netif_queue_stopped(). Without the
3439 * memory barrier, there is a small possibility that tg3_start_xmit()
3440 * will miss it and cause the queue to be stopped forever.
3444 if (unlikely(netif_queue_stopped(tp->dev) &&
3445 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3446 netif_tx_lock(tp->dev);
3447 if (netif_queue_stopped(tp->dev) &&
3448 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3449 netif_wake_queue(tp->dev);
3450 netif_tx_unlock(tp->dev);
3454 /* Returns size of skb allocated or < 0 on error.
3456 * We only need to fill in the address because the other members
3457 * of the RX descriptor are invariant, see tg3_init_rings.
3459 * Note the purposeful assymetry of cpu vs. chip accesses. For
3460 * posting buffers we only dirty the first cache line of the RX
3461 * descriptor (containing the address). Whereas for the RX status
3462 * buffers the cpu only reads the last cacheline of the RX descriptor
3463 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3465 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3466 int src_idx, u32 dest_idx_unmasked)
3468 struct tg3_rx_buffer_desc *desc;
3469 struct ring_info *map, *src_map;
3470 struct sk_buff *skb;
3472 int skb_size, dest_idx;
3475 switch (opaque_key) {
3476 case RXD_OPAQUE_RING_STD:
3477 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3478 desc = &tp->rx_std[dest_idx];
3479 map = &tp->rx_std_buffers[dest_idx];
3481 src_map = &tp->rx_std_buffers[src_idx];
3482 skb_size = tp->rx_pkt_buf_sz;
3485 case RXD_OPAQUE_RING_JUMBO:
3486 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3487 desc = &tp->rx_jumbo[dest_idx];
3488 map = &tp->rx_jumbo_buffers[dest_idx];
3490 src_map = &tp->rx_jumbo_buffers[src_idx];
3491 skb_size = RX_JUMBO_PKT_BUF_SZ;
3498 /* Do not overwrite any of the map or rp information
3499 * until we are sure we can commit to a new buffer.
3501 * Callers depend upon this behavior and assume that
3502 * we leave everything unchanged if we fail.
3504 skb = netdev_alloc_skb(tp->dev, skb_size);
3508 skb_reserve(skb, tp->rx_offset);
3510 mapping = pci_map_single(tp->pdev, skb->data,
3511 skb_size - tp->rx_offset,
3512 PCI_DMA_FROMDEVICE);
3515 pci_unmap_addr_set(map, mapping, mapping);
3517 if (src_map != NULL)
3518 src_map->skb = NULL;
3520 desc->addr_hi = ((u64)mapping >> 32);
3521 desc->addr_lo = ((u64)mapping & 0xffffffff);
3526 /* We only need to move over in the address because the other
3527 * members of the RX descriptor are invariant. See notes above
3528 * tg3_alloc_rx_skb for full details.
3530 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3531 int src_idx, u32 dest_idx_unmasked)
3533 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3534 struct ring_info *src_map, *dest_map;
3537 switch (opaque_key) {
3538 case RXD_OPAQUE_RING_STD:
3539 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3540 dest_desc = &tp->rx_std[dest_idx];
3541 dest_map = &tp->rx_std_buffers[dest_idx];
3542 src_desc = &tp->rx_std[src_idx];
3543 src_map = &tp->rx_std_buffers[src_idx];
3546 case RXD_OPAQUE_RING_JUMBO:
3547 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3548 dest_desc = &tp->rx_jumbo[dest_idx];
3549 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3550 src_desc = &tp->rx_jumbo[src_idx];
3551 src_map = &tp->rx_jumbo_buffers[src_idx];
3558 dest_map->skb = src_map->skb;
3559 pci_unmap_addr_set(dest_map, mapping,
3560 pci_unmap_addr(src_map, mapping));
3561 dest_desc->addr_hi = src_desc->addr_hi;
3562 dest_desc->addr_lo = src_desc->addr_lo;
3564 src_map->skb = NULL;
3567 #if TG3_VLAN_TAG_USED
3568 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3570 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3574 /* The RX ring scheme is composed of multiple rings which post fresh
3575 * buffers to the chip, and one special ring the chip uses to report
3576 * status back to the host.
3578 * The special ring reports the status of received packets to the
3579 * host. The chip does not write into the original descriptor the
3580 * RX buffer was obtained from. The chip simply takes the original
3581 * descriptor as provided by the host, updates the status and length
3582 * field, then writes this into the next status ring entry.
3584 * Each ring the host uses to post buffers to the chip is described
3585 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3586 * it is first placed into the on-chip ram. When the packet's length
3587 * is known, it walks down the TG3_BDINFO entries to select the ring.
3588 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3589 * which is within the range of the new packet's length is chosen.
3591 * The "separate ring for rx status" scheme may sound queer, but it makes
3592 * sense from a cache coherency perspective. If only the host writes
3593 * to the buffer post rings, and only the chip writes to the rx status
3594 * rings, then cache lines never move beyond shared-modified state.
3595 * If both the host and chip were to write into the same ring, cache line
3596 * eviction could occur since both entities want it in an exclusive state.
3598 static int tg3_rx(struct tg3 *tp, int budget)
3600 u32 work_mask, rx_std_posted = 0;
3601 u32 sw_idx = tp->rx_rcb_ptr;
3605 hw_idx = tp->hw_status->idx[0].rx_producer;
3607 * We need to order the read of hw_idx and the read of
3608 * the opaque cookie.
3613 while (sw_idx != hw_idx && budget > 0) {
3614 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3616 struct sk_buff *skb;
3617 dma_addr_t dma_addr;
3618 u32 opaque_key, desc_idx, *post_ptr;
3620 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3621 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3622 if (opaque_key == RXD_OPAQUE_RING_STD) {
3623 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3625 skb = tp->rx_std_buffers[desc_idx].skb;
3626 post_ptr = &tp->rx_std_ptr;
3628 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3629 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3631 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3632 post_ptr = &tp->rx_jumbo_ptr;
3635 goto next_pkt_nopost;
3638 work_mask |= opaque_key;
3640 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3641 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3643 tg3_recycle_rx(tp, opaque_key,
3644 desc_idx, *post_ptr);
3646 /* Other statistics kept track of by card. */
3647 tp->net_stats.rx_dropped++;
3651 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3653 if (len > RX_COPY_THRESHOLD
3654 && tp->rx_offset == 2
3655 /* rx_offset != 2 iff this is a 5701 card running
3656 * in PCI-X mode [see tg3_get_invariants()] */
3660 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3661 desc_idx, *post_ptr);
3665 pci_unmap_single(tp->pdev, dma_addr,
3666 skb_size - tp->rx_offset,
3667 PCI_DMA_FROMDEVICE);
3671 struct sk_buff *copy_skb;
3673 tg3_recycle_rx(tp, opaque_key,
3674 desc_idx, *post_ptr);
3676 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3677 if (copy_skb == NULL)
3678 goto drop_it_no_recycle;
3680 skb_reserve(copy_skb, 2);
3681 skb_put(copy_skb, len);
3682 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3683 skb_copy_from_linear_data(skb, copy_skb->data, len);
3684 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3686 /* We'll reuse the original ring buffer. */
3690 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3691 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3692 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3693 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3694 skb->ip_summed = CHECKSUM_UNNECESSARY;
3696 skb->ip_summed = CHECKSUM_NONE;
3698 skb->protocol = eth_type_trans(skb, tp->dev);
3699 #if TG3_VLAN_TAG_USED
3700 if (tp->vlgrp != NULL &&
3701 desc->type_flags & RXD_FLAG_VLAN) {
3702 tg3_vlan_rx(tp, skb,
3703 desc->err_vlan & RXD_VLAN_MASK);
3706 netif_receive_skb(skb);
3708 tp->dev->last_rx = jiffies;
3715 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3716 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3718 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3719 TG3_64BIT_REG_LOW, idx);
3720 work_mask &= ~RXD_OPAQUE_RING_STD;
3725 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3727 /* Refresh hw_idx to see if there is new work */
3728 if (sw_idx == hw_idx) {
3729 hw_idx = tp->hw_status->idx[0].rx_producer;
3734 /* ACK the status ring. */
3735 tp->rx_rcb_ptr = sw_idx;
3736 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3738 /* Refill RX ring(s). */
3739 if (work_mask & RXD_OPAQUE_RING_STD) {
3740 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3741 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3744 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3745 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3746 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3754 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3756 struct tg3_hw_status *sblk = tp->hw_status;
3758 /* handle link change and other phy events */
3759 if (!(tp->tg3_flags &
3760 (TG3_FLAG_USE_LINKCHG_REG |
3761 TG3_FLAG_POLL_SERDES))) {
3762 if (sblk->status & SD_STATUS_LINK_CHG) {
3763 sblk->status = SD_STATUS_UPDATED |
3764 (sblk->status & ~SD_STATUS_LINK_CHG);
3765 spin_lock(&tp->lock);
3766 tg3_setup_phy(tp, 0);
3767 spin_unlock(&tp->lock);
3771 /* run TX completion thread */
3772 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3774 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3778 /* run RX thread, within the bounds set by NAPI.
3779 * All RX "locking" is done by ensuring outside
3780 * code synchronizes with tg3->napi.poll()
3782 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3783 work_done += tg3_rx(tp, budget - work_done);
3788 static int tg3_poll(struct napi_struct *napi, int budget)
3790 struct tg3 *tp = container_of(napi, struct tg3, napi);
3792 struct tg3_hw_status *sblk = tp->hw_status;
3795 work_done = tg3_poll_work(tp, work_done, budget);
3797 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3800 if (unlikely(work_done >= budget))
3803 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3804 /* tp->last_tag is used in tg3_restart_ints() below
3805 * to tell the hw how much work has been processed,
3806 * so we must read it before checking for more work.
3808 tp->last_tag = sblk->status_tag;
3811 sblk->status &= ~SD_STATUS_UPDATED;
3813 if (likely(!tg3_has_work(tp))) {
3814 netif_rx_complete(tp->dev, napi);
3815 tg3_restart_ints(tp);
3823 /* work_done is guaranteed to be less than budget. */
3824 netif_rx_complete(tp->dev, napi);
3825 schedule_work(&tp->reset_task);
3829 static void tg3_irq_quiesce(struct tg3 *tp)
3831 BUG_ON(tp->irq_sync);
3836 synchronize_irq(tp->pdev->irq);
3839 static inline int tg3_irq_sync(struct tg3 *tp)
3841 return tp->irq_sync;
3844 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3845 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3846 * with as well. Most of the time, this is not necessary except when
3847 * shutting down the device.
3849 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3851 spin_lock_bh(&tp->lock);
3853 tg3_irq_quiesce(tp);
3856 static inline void tg3_full_unlock(struct tg3 *tp)
3858 spin_unlock_bh(&tp->lock);
3861 /* One-shot MSI handler - Chip automatically disables interrupt
3862 * after sending MSI so driver doesn't have to do it.
3864 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3866 struct net_device *dev = dev_id;
3867 struct tg3 *tp = netdev_priv(dev);
3869 prefetch(tp->hw_status);
3870 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3872 if (likely(!tg3_irq_sync(tp)))
3873 netif_rx_schedule(dev, &tp->napi);
3878 /* MSI ISR - No need to check for interrupt sharing and no need to
3879 * flush status block and interrupt mailbox. PCI ordering rules
3880 * guarantee that MSI will arrive after the status block.
3882 static irqreturn_t tg3_msi(int irq, void *dev_id)
3884 struct net_device *dev = dev_id;
3885 struct tg3 *tp = netdev_priv(dev);
3887 prefetch(tp->hw_status);
3888 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3890 * Writing any value to intr-mbox-0 clears PCI INTA# and
3891 * chip-internal interrupt pending events.
3892 * Writing non-zero to intr-mbox-0 additional tells the
3893 * NIC to stop sending us irqs, engaging "in-intr-handler"
3896 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3897 if (likely(!tg3_irq_sync(tp)))
3898 netif_rx_schedule(dev, &tp->napi);
3900 return IRQ_RETVAL(1);
3903 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3905 struct net_device *dev = dev_id;
3906 struct tg3 *tp = netdev_priv(dev);
3907 struct tg3_hw_status *sblk = tp->hw_status;
3908 unsigned int handled = 1;
3910 /* In INTx mode, it is possible for the interrupt to arrive at
3911 * the CPU before the status block posted prior to the interrupt.
3912 * Reading the PCI State register will confirm whether the
3913 * interrupt is ours and will flush the status block.
3915 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3916 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3917 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3924 * Writing any value to intr-mbox-0 clears PCI INTA# and
3925 * chip-internal interrupt pending events.
3926 * Writing non-zero to intr-mbox-0 additional tells the
3927 * NIC to stop sending us irqs, engaging "in-intr-handler"
3930 * Flush the mailbox to de-assert the IRQ immediately to prevent
3931 * spurious interrupts. The flush impacts performance but
3932 * excessive spurious interrupts can be worse in some cases.
3934 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3935 if (tg3_irq_sync(tp))
3937 sblk->status &= ~SD_STATUS_UPDATED;
3938 if (likely(tg3_has_work(tp))) {
3939 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3940 netif_rx_schedule(dev, &tp->napi);
3942 /* No work, shared interrupt perhaps? re-enable
3943 * interrupts, and flush that PCI write
3945 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3949 return IRQ_RETVAL(handled);
3952 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3954 struct net_device *dev = dev_id;
3955 struct tg3 *tp = netdev_priv(dev);
3956 struct tg3_hw_status *sblk = tp->hw_status;
3957 unsigned int handled = 1;
3959 /* In INTx mode, it is possible for the interrupt to arrive at
3960 * the CPU before the status block posted prior to the interrupt.
3961 * Reading the PCI State register will confirm whether the
3962 * interrupt is ours and will flush the status block.
3964 if (unlikely(sblk->status_tag == tp->last_tag)) {
3965 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3966 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3973 * writing any value to intr-mbox-0 clears PCI INTA# and
3974 * chip-internal interrupt pending events.
3975 * writing non-zero to intr-mbox-0 additional tells the
3976 * NIC to stop sending us irqs, engaging "in-intr-handler"
3979 * Flush the mailbox to de-assert the IRQ immediately to prevent
3980 * spurious interrupts. The flush impacts performance but
3981 * excessive spurious interrupts can be worse in some cases.
3983 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3984 if (tg3_irq_sync(tp))
3986 if (netif_rx_schedule_prep(dev, &tp->napi)) {
3987 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3988 /* Update last_tag to mark that this status has been
3989 * seen. Because interrupt may be shared, we may be
3990 * racing with tg3_poll(), so only update last_tag
3991 * if tg3_poll() is not scheduled.
3993 tp->last_tag = sblk->status_tag;
3994 __netif_rx_schedule(dev, &tp->napi);
3997 return IRQ_RETVAL(handled);
4000 /* ISR for interrupt test */
4001 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4003 struct net_device *dev = dev_id;
4004 struct tg3 *tp = netdev_priv(dev);
4005 struct tg3_hw_status *sblk = tp->hw_status;
4007 if ((sblk->status & SD_STATUS_UPDATED) ||
4008 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4009 tg3_disable_ints(tp);
4010 return IRQ_RETVAL(1);
4012 return IRQ_RETVAL(0);
4015 static int tg3_init_hw(struct tg3 *, int);
4016 static int tg3_halt(struct tg3 *, int, int);
4018 /* Restart hardware after configuration changes, self-test, etc.
4019 * Invoked with tp->lock held.
4021 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4022 __releases(tp->lock)
4023 __acquires(tp->lock)
4027 err = tg3_init_hw(tp, reset_phy);
4029 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4030 "aborting.\n", tp->dev->name);
4031 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4032 tg3_full_unlock(tp);
4033 del_timer_sync(&tp->timer);
4035 napi_enable(&tp->napi);
4037 tg3_full_lock(tp, 0);
4042 #ifdef CONFIG_NET_POLL_CONTROLLER
4043 static void tg3_poll_controller(struct net_device *dev)
4045 struct tg3 *tp = netdev_priv(dev);
4047 tg3_interrupt(tp->pdev->irq, dev);
4051 static void tg3_reset_task(struct work_struct *work)
4053 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4054 unsigned int restart_timer;
4056 tg3_full_lock(tp, 0);
4058 if (!netif_running(tp->dev)) {
4059 tg3_full_unlock(tp);
4063 tg3_full_unlock(tp);
4067 tg3_full_lock(tp, 1);
4069 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4070 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4072 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4073 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4074 tp->write32_rx_mbox = tg3_write_flush_reg32;
4075 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4076 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4079 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4080 if (tg3_init_hw(tp, 1))
4083 tg3_netif_start(tp);
4086 mod_timer(&tp->timer, jiffies + 1);
4089 tg3_full_unlock(tp);
4092 static void tg3_dump_short_state(struct tg3 *tp)
4094 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4095 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4096 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4097 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4100 static void tg3_tx_timeout(struct net_device *dev)
4102 struct tg3 *tp = netdev_priv(dev);
4104 if (netif_msg_tx_err(tp)) {
4105 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4107 tg3_dump_short_state(tp);
4110 schedule_work(&tp->reset_task);
4113 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4114 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4116 u32 base = (u32) mapping & 0xffffffff;
4118 return ((base > 0xffffdcc0) &&
4119 (base + len + 8 < base));
4122 /* Test for DMA addresses > 40-bit */
4123 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4126 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4127 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4128 return (((u64) mapping + len) > DMA_40BIT_MASK);
4135 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4137 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4138 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4139 u32 last_plus_one, u32 *start,
4140 u32 base_flags, u32 mss)
4142 struct sk_buff *new_skb;
4143 dma_addr_t new_addr = 0;
4147 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4148 new_skb = skb_copy(skb, GFP_ATOMIC);
4150 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4152 new_skb = skb_copy_expand(skb,
4153 skb_headroom(skb) + more_headroom,
4154 skb_tailroom(skb), GFP_ATOMIC);
4160 /* New SKB is guaranteed to be linear. */
4162 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4164 /* Make sure new skb does not cross any 4G boundaries.
4165 * Drop the packet if it does.
4167 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4169 dev_kfree_skb(new_skb);
4172 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4173 base_flags, 1 | (mss << 1));
4174 *start = NEXT_TX(entry);
4178 /* Now clean up the sw ring entries. */
4180 while (entry != last_plus_one) {
4184 len = skb_headlen(skb);
4186 len = skb_shinfo(skb)->frags[i-1].size;
4187 pci_unmap_single(tp->pdev,
4188 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4189 len, PCI_DMA_TODEVICE);
4191 tp->tx_buffers[entry].skb = new_skb;
4192 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4194 tp->tx_buffers[entry].skb = NULL;
4196 entry = NEXT_TX(entry);
4205 static void tg3_set_txd(struct tg3 *tp, int entry,
4206 dma_addr_t mapping, int len, u32 flags,
4209 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4210 int is_end = (mss_and_is_end & 0x1);
4211 u32 mss = (mss_and_is_end >> 1);
4215 flags |= TXD_FLAG_END;
4216 if (flags & TXD_FLAG_VLAN) {
4217 vlan_tag = flags >> 16;
4220 vlan_tag |= (mss << TXD_MSS_SHIFT);
4222 txd->addr_hi = ((u64) mapping >> 32);
4223 txd->addr_lo = ((u64) mapping & 0xffffffff);
4224 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4225 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4228 /* hard_start_xmit for devices that don't have any bugs and
4229 * support TG3_FLG2_HW_TSO_2 only.
4231 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4233 struct tg3 *tp = netdev_priv(dev);
4235 u32 len, entry, base_flags, mss;
4237 len = skb_headlen(skb);
4239 /* We are running in BH disabled context with netif_tx_lock
4240 * and TX reclaim runs via tp->napi.poll inside of a software
4241 * interrupt. Furthermore, IRQ processing runs lockless so we have
4242 * no IRQ context deadlocks to worry about either. Rejoice!
4244 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4245 if (!netif_queue_stopped(dev)) {
4246 netif_stop_queue(dev);
4248 /* This is a hard error, log it. */
4249 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4250 "queue awake!\n", dev->name);
4252 return NETDEV_TX_BUSY;
4255 entry = tp->tx_prod;
4258 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4259 int tcp_opt_len, ip_tcp_len;
4261 if (skb_header_cloned(skb) &&
4262 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4267 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4268 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4270 struct iphdr *iph = ip_hdr(skb);
4272 tcp_opt_len = tcp_optlen(skb);
4273 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4276 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4277 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4280 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4281 TXD_FLAG_CPU_POST_DMA);
4283 tcp_hdr(skb)->check = 0;
4286 else if (skb->ip_summed == CHECKSUM_PARTIAL)
4287 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4288 #if TG3_VLAN_TAG_USED
4289 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4290 base_flags |= (TXD_FLAG_VLAN |
4291 (vlan_tx_tag_get(skb) << 16));
4294 /* Queue skb data, a.k.a. the main skb fragment. */
4295 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4297 tp->tx_buffers[entry].skb = skb;
4298 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4300 tg3_set_txd(tp, entry, mapping, len, base_flags,
4301 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4303 entry = NEXT_TX(entry);
4305 /* Now loop through additional data fragments, and queue them. */
4306 if (skb_shinfo(skb)->nr_frags > 0) {
4307 unsigned int i, last;
4309 last = skb_shinfo(skb)->nr_frags - 1;
4310 for (i = 0; i <= last; i++) {
4311 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4314 mapping = pci_map_page(tp->pdev,
4317 len, PCI_DMA_TODEVICE);
4319 tp->tx_buffers[entry].skb = NULL;
4320 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4322 tg3_set_txd(tp, entry, mapping, len,
4323 base_flags, (i == last) | (mss << 1));
4325 entry = NEXT_TX(entry);
4329 /* Packets are ready, update Tx producer idx local and on card. */
4330 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4332 tp->tx_prod = entry;
4333 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4334 netif_stop_queue(dev);
4335 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4336 netif_wake_queue(tp->dev);
4342 dev->trans_start = jiffies;
4344 return NETDEV_TX_OK;
4347 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4349 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4350 * TSO header is greater than 80 bytes.
4352 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4354 struct sk_buff *segs, *nskb;
4356 /* Estimate the number of fragments in the worst case */
4357 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4358 netif_stop_queue(tp->dev);
4359 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4360 return NETDEV_TX_BUSY;
4362 netif_wake_queue(tp->dev);
4365 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4367 goto tg3_tso_bug_end;
4373 tg3_start_xmit_dma_bug(nskb, tp->dev);
4379 return NETDEV_TX_OK;
4382 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4383 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4385 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4387 struct tg3 *tp = netdev_priv(dev);
4389 u32 len, entry, base_flags, mss;
4390 int would_hit_hwbug;
4392 len = skb_headlen(skb);
4394 /* We are running in BH disabled context with netif_tx_lock
4395 * and TX reclaim runs via tp->napi.poll inside of a software
4396 * interrupt. Furthermore, IRQ processing runs lockless so we have
4397 * no IRQ context deadlocks to worry about either. Rejoice!
4399 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4400 if (!netif_queue_stopped(dev)) {
4401 netif_stop_queue(dev);
4403 /* This is a hard error, log it. */
4404 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4405 "queue awake!\n", dev->name);
4407 return NETDEV_TX_BUSY;
4410 entry = tp->tx_prod;
4412 if (skb->ip_summed == CHECKSUM_PARTIAL)
4413 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4415 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4417 int tcp_opt_len, ip_tcp_len, hdr_len;
4419 if (skb_header_cloned(skb) &&
4420 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4425 tcp_opt_len = tcp_optlen(skb);
4426 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4428 hdr_len = ip_tcp_len + tcp_opt_len;
4429 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4430 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4431 return (tg3_tso_bug(tp, skb));
4433 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4434 TXD_FLAG_CPU_POST_DMA);
4438 iph->tot_len = htons(mss + hdr_len);
4439 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4440 tcp_hdr(skb)->check = 0;
4441 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4443 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4448 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4449 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4450 if (tcp_opt_len || iph->ihl > 5) {
4453 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4454 mss |= (tsflags << 11);
4457 if (tcp_opt_len || iph->ihl > 5) {
4460 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4461 base_flags |= tsflags << 12;
4465 #if TG3_VLAN_TAG_USED
4466 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4467 base_flags |= (TXD_FLAG_VLAN |
4468 (vlan_tx_tag_get(skb) << 16));
4471 /* Queue skb data, a.k.a. the main skb fragment. */
4472 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4474 tp->tx_buffers[entry].skb = skb;
4475 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4477 would_hit_hwbug = 0;
4479 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4480 would_hit_hwbug = 1;
4481 else if (tg3_4g_overflow_test(mapping, len))
4482 would_hit_hwbug = 1;
4484 tg3_set_txd(tp, entry, mapping, len, base_flags,
4485 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4487 entry = NEXT_TX(entry);
4489 /* Now loop through additional data fragments, and queue them. */
4490 if (skb_shinfo(skb)->nr_frags > 0) {
4491 unsigned int i, last;
4493 last = skb_shinfo(skb)->nr_frags - 1;
4494 for (i = 0; i <= last; i++) {
4495 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4498 mapping = pci_map_page(tp->pdev,
4501 len, PCI_DMA_TODEVICE);
4503 tp->tx_buffers[entry].skb = NULL;
4504 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4506 if (tg3_4g_overflow_test(mapping, len))
4507 would_hit_hwbug = 1;
4509 if (tg3_40bit_overflow_test(tp, mapping, len))
4510 would_hit_hwbug = 1;
4512 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4513 tg3_set_txd(tp, entry, mapping, len,
4514 base_flags, (i == last)|(mss << 1));
4516 tg3_set_txd(tp, entry, mapping, len,
4517 base_flags, (i == last));
4519 entry = NEXT_TX(entry);
4523 if (would_hit_hwbug) {
4524 u32 last_plus_one = entry;
4527 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4528 start &= (TG3_TX_RING_SIZE - 1);
4530 /* If the workaround fails due to memory/mapping
4531 * failure, silently drop this packet.
4533 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4534 &start, base_flags, mss))
4540 /* Packets are ready, update Tx producer idx local and on card. */
4541 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4543 tp->tx_prod = entry;
4544 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4545 netif_stop_queue(dev);
4546 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4547 netif_wake_queue(tp->dev);
4553 dev->trans_start = jiffies;
4555 return NETDEV_TX_OK;
4558 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4563 if (new_mtu > ETH_DATA_LEN) {
4564 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4565 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4566 ethtool_op_set_tso(dev, 0);
4569 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4571 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4572 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4573 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4577 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4579 struct tg3 *tp = netdev_priv(dev);
4582 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4585 if (!netif_running(dev)) {
4586 /* We'll just catch it later when the
4589 tg3_set_mtu(dev, tp, new_mtu);
4595 tg3_full_lock(tp, 1);
4597 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4599 tg3_set_mtu(dev, tp, new_mtu);
4601 err = tg3_restart_hw(tp, 0);
4604 tg3_netif_start(tp);
4606 tg3_full_unlock(tp);
4611 /* Free up pending packets in all rx/tx rings.
4613 * The chip has been shut down and the driver detached from
4614 * the networking, so no interrupts or new tx packets will
4615 * end up in the driver. tp->{tx,}lock is not held and we are not
4616 * in an interrupt context and thus may sleep.
4618 static void tg3_free_rings(struct tg3 *tp)
4620 struct ring_info *rxp;
4623 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4624 rxp = &tp->rx_std_buffers[i];
4626 if (rxp->skb == NULL)
4628 pci_unmap_single(tp->pdev,
4629 pci_unmap_addr(rxp, mapping),
4630 tp->rx_pkt_buf_sz - tp->rx_offset,
4631 PCI_DMA_FROMDEVICE);
4632 dev_kfree_skb_any(rxp->skb);
4636 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4637 rxp = &tp->rx_jumbo_buffers[i];
4639 if (rxp->skb == NULL)
4641 pci_unmap_single(tp->pdev,
4642 pci_unmap_addr(rxp, mapping),
4643 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4644 PCI_DMA_FROMDEVICE);
4645 dev_kfree_skb_any(rxp->skb);
4649 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4650 struct tx_ring_info *txp;
4651 struct sk_buff *skb;
4654 txp = &tp->tx_buffers[i];
4662 pci_unmap_single(tp->pdev,
4663 pci_unmap_addr(txp, mapping),
4670 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4671 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4672 pci_unmap_page(tp->pdev,
4673 pci_unmap_addr(txp, mapping),
4674 skb_shinfo(skb)->frags[j].size,
4679 dev_kfree_skb_any(skb);
4683 /* Initialize tx/rx rings for packet processing.
4685 * The chip has been shut down and the driver detached from
4686 * the networking, so no interrupts or new tx packets will
4687 * end up in the driver. tp->{tx,}lock are held and thus
4690 static int tg3_init_rings(struct tg3 *tp)
4694 /* Free up all the SKBs. */
4697 /* Zero out all descriptors. */
4698 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4699 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4700 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4701 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4703 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4704 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4705 (tp->dev->mtu > ETH_DATA_LEN))
4706 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4708 /* Initialize invariants of the rings, we only set this
4709 * stuff once. This works because the card does not
4710 * write into the rx buffer posting rings.
4712 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4713 struct tg3_rx_buffer_desc *rxd;
4715 rxd = &tp->rx_std[i];
4716 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4718 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4719 rxd->opaque = (RXD_OPAQUE_RING_STD |
4720 (i << RXD_OPAQUE_INDEX_SHIFT));
4723 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4724 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4725 struct tg3_rx_buffer_desc *rxd;
4727 rxd = &tp->rx_jumbo[i];
4728 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4730 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4732 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4733 (i << RXD_OPAQUE_INDEX_SHIFT));
4737 /* Now allocate fresh SKBs for each rx ring. */
4738 for (i = 0; i < tp->rx_pending; i++) {
4739 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4740 printk(KERN_WARNING PFX
4741 "%s: Using a smaller RX standard ring, "
4742 "only %d out of %d buffers were allocated "
4744 tp->dev->name, i, tp->rx_pending);
4752 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4753 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4754 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4756 printk(KERN_WARNING PFX
4757 "%s: Using a smaller RX jumbo ring, "
4758 "only %d out of %d buffers were "
4759 "allocated successfully.\n",
4760 tp->dev->name, i, tp->rx_jumbo_pending);
4765 tp->rx_jumbo_pending = i;
4774 * Must not be invoked with interrupt sources disabled and
4775 * the hardware shutdown down.
4777 static void tg3_free_consistent(struct tg3 *tp)
4779 kfree(tp->rx_std_buffers);
4780 tp->rx_std_buffers = NULL;
4782 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4783 tp->rx_std, tp->rx_std_mapping);
4787 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4788 tp->rx_jumbo, tp->rx_jumbo_mapping);
4789 tp->rx_jumbo = NULL;
4792 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4793 tp->rx_rcb, tp->rx_rcb_mapping);
4797 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4798 tp->tx_ring, tp->tx_desc_mapping);
4801 if (tp->hw_status) {
4802 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4803 tp->hw_status, tp->status_mapping);
4804 tp->hw_status = NULL;
4807 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4808 tp->hw_stats, tp->stats_mapping);
4809 tp->hw_stats = NULL;
4814 * Must not be invoked with interrupt sources disabled and
4815 * the hardware shutdown down. Can sleep.
4817 static int tg3_alloc_consistent(struct tg3 *tp)
4819 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4821 TG3_RX_JUMBO_RING_SIZE)) +
4822 (sizeof(struct tx_ring_info) *
4825 if (!tp->rx_std_buffers)
4828 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4829 tp->tx_buffers = (struct tx_ring_info *)
4830 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4832 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4833 &tp->rx_std_mapping);
4837 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4838 &tp->rx_jumbo_mapping);
4843 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4844 &tp->rx_rcb_mapping);
4848 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4849 &tp->tx_desc_mapping);
4853 tp->hw_status = pci_alloc_consistent(tp->pdev,
4855 &tp->status_mapping);
4859 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4860 sizeof(struct tg3_hw_stats),
4861 &tp->stats_mapping);
4865 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4866 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4871 tg3_free_consistent(tp);
4875 #define MAX_WAIT_CNT 1000
4877 /* To stop a block, clear the enable bit and poll till it
4878 * clears. tp->lock is held.
4880 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4885 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4892 /* We can't enable/disable these bits of the
4893 * 5705/5750, just say success.
4906 for (i = 0; i < MAX_WAIT_CNT; i++) {
4909 if ((val & enable_bit) == 0)
4913 if (i == MAX_WAIT_CNT && !silent) {
4914 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4915 "ofs=%lx enable_bit=%x\n",
4923 /* tp->lock is held. */
4924 static int tg3_abort_hw(struct tg3 *tp, int silent)
4928 tg3_disable_ints(tp);
4930 tp->rx_mode &= ~RX_MODE_ENABLE;
4931 tw32_f(MAC_RX_MODE, tp->rx_mode);
4934 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4935 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4936 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4937 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4938 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4939 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4941 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4942 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4943 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4944 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4945 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4946 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4947 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4949 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4950 tw32_f(MAC_MODE, tp->mac_mode);
4953 tp->tx_mode &= ~TX_MODE_ENABLE;
4954 tw32_f(MAC_TX_MODE, tp->tx_mode);
4956 for (i = 0; i < MAX_WAIT_CNT; i++) {
4958 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4961 if (i >= MAX_WAIT_CNT) {
4962 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4963 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4964 tp->dev->name, tr32(MAC_TX_MODE));
4968 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4969 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4970 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4972 tw32(FTQ_RESET, 0xffffffff);
4973 tw32(FTQ_RESET, 0x00000000);
4975 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4976 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4979 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4981 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4986 /* tp->lock is held. */
4987 static int tg3_nvram_lock(struct tg3 *tp)
4989 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4992 if (tp->nvram_lock_cnt == 0) {
4993 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4994 for (i = 0; i < 8000; i++) {
4995 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5000 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5004 tp->nvram_lock_cnt++;
5009 /* tp->lock is held. */
5010 static void tg3_nvram_unlock(struct tg3 *tp)
5012 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5013 if (tp->nvram_lock_cnt > 0)
5014 tp->nvram_lock_cnt--;
5015 if (tp->nvram_lock_cnt == 0)
5016 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5020 /* tp->lock is held. */
5021 static void tg3_enable_nvram_access(struct tg3 *tp)
5023 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5024 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5025 u32 nvaccess = tr32(NVRAM_ACCESS);
5027 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5031 /* tp->lock is held. */
5032 static void tg3_disable_nvram_access(struct tg3 *tp)
5034 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5035 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5036 u32 nvaccess = tr32(NVRAM_ACCESS);
5038 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5042 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5047 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5048 if (apedata != APE_SEG_SIG_MAGIC)
5051 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5052 if (apedata != APE_FW_STATUS_READY)
5055 /* Wait for up to 1 millisecond for APE to service previous event. */
5056 for (i = 0; i < 10; i++) {
5057 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5060 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5062 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5063 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5064 event | APE_EVENT_STATUS_EVENT_PENDING);
5066 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5068 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5074 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5075 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5078 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5083 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5087 case RESET_KIND_INIT:
5088 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5089 APE_HOST_SEG_SIG_MAGIC);
5090 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5091 APE_HOST_SEG_LEN_MAGIC);
5092 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5093 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5094 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5095 APE_HOST_DRIVER_ID_MAGIC);
5096 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5097 APE_HOST_BEHAV_NO_PHYLOCK);
5099 event = APE_EVENT_STATUS_STATE_START;
5101 case RESET_KIND_SHUTDOWN:
5102 event = APE_EVENT_STATUS_STATE_UNLOAD;
5104 case RESET_KIND_SUSPEND:
5105 event = APE_EVENT_STATUS_STATE_SUSPEND;
5111 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5113 tg3_ape_send_event(tp, event);
5116 /* tp->lock is held. */
5117 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5119 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5120 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5122 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5124 case RESET_KIND_INIT:
5125 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5129 case RESET_KIND_SHUTDOWN:
5130 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5134 case RESET_KIND_SUSPEND:
5135 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5144 if (kind == RESET_KIND_INIT ||
5145 kind == RESET_KIND_SUSPEND)
5146 tg3_ape_driver_state_change(tp, kind);
5149 /* tp->lock is held. */
5150 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5152 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5154 case RESET_KIND_INIT:
5155 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5156 DRV_STATE_START_DONE);
5159 case RESET_KIND_SHUTDOWN:
5160 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5161 DRV_STATE_UNLOAD_DONE);
5169 if (kind == RESET_KIND_SHUTDOWN)
5170 tg3_ape_driver_state_change(tp, kind);
5173 /* tp->lock is held. */
5174 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5176 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5178 case RESET_KIND_INIT:
5179 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5183 case RESET_KIND_SHUTDOWN:
5184 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5188 case RESET_KIND_SUSPEND:
5189 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5199 static int tg3_poll_fw(struct tg3 *tp)
5204 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5205 /* Wait up to 20ms for init done. */
5206 for (i = 0; i < 200; i++) {
5207 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5214 /* Wait for firmware initialization to complete. */
5215 for (i = 0; i < 100000; i++) {
5216 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5217 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5222 /* Chip might not be fitted with firmware. Some Sun onboard
5223 * parts are configured like that. So don't signal the timeout
5224 * of the above loop as an error, but do report the lack of
5225 * running firmware once.
5228 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5229 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5231 printk(KERN_INFO PFX "%s: No firmware running.\n",
5238 /* Save PCI command register before chip reset */
5239 static void tg3_save_pci_state(struct tg3 *tp)
5241 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5244 /* Restore PCI state after chip reset */
5245 static void tg3_restore_pci_state(struct tg3 *tp)
5249 /* Re-enable indirect register accesses. */
5250 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5251 tp->misc_host_ctrl);
5253 /* Set MAX PCI retry to zero. */
5254 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5255 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5256 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5257 val |= PCISTATE_RETRY_SAME_DMA;
5258 /* Allow reads and writes to the APE register and memory space. */
5259 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5260 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5261 PCISTATE_ALLOW_APE_SHMEM_WR;
5262 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5264 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5266 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5267 pcie_set_readrq(tp->pdev, 4096);
5269 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5270 tp->pci_cacheline_sz);
5271 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5275 /* Make sure PCI-X relaxed ordering bit is clear. */
5279 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5281 pcix_cmd &= ~PCI_X_CMD_ERO;
5282 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5286 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5288 /* Chip reset on 5780 will reset MSI enable bit,
5289 * so need to restore it.
5291 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5294 pci_read_config_word(tp->pdev,
5295 tp->msi_cap + PCI_MSI_FLAGS,
5297 pci_write_config_word(tp->pdev,
5298 tp->msi_cap + PCI_MSI_FLAGS,
5299 ctrl | PCI_MSI_FLAGS_ENABLE);
5300 val = tr32(MSGINT_MODE);
5301 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5306 static void tg3_stop_fw(struct tg3 *);
5308 /* tp->lock is held. */
5309 static int tg3_chip_reset(struct tg3 *tp)
5312 void (*write_op)(struct tg3 *, u32, u32);
5317 /* No matching tg3_nvram_unlock() after this because
5318 * chip reset below will undo the nvram lock.
5320 tp->nvram_lock_cnt = 0;
5322 /* GRC_MISC_CFG core clock reset will clear the memory
5323 * enable bit in PCI register 4 and the MSI enable bit
5324 * on some chips, so we save relevant registers here.
5326 tg3_save_pci_state(tp);
5328 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5329 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5330 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5332 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5333 tw32(GRC_FASTBOOT_PC, 0);
5336 * We must avoid the readl() that normally takes place.
5337 * It locks machines, causes machine checks, and other
5338 * fun things. So, temporarily disable the 5701
5339 * hardware workaround, while we do the reset.
5341 write_op = tp->write32;
5342 if (write_op == tg3_write_flush_reg32)
5343 tp->write32 = tg3_write32;
5345 /* Prevent the irq handler from reading or writing PCI registers
5346 * during chip reset when the memory enable bit in the PCI command
5347 * register may be cleared. The chip does not generate interrupt
5348 * at this time, but the irq handler may still be called due to irq
5349 * sharing or irqpoll.
5351 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5352 if (tp->hw_status) {
5353 tp->hw_status->status = 0;
5354 tp->hw_status->status_tag = 0;
5358 synchronize_irq(tp->pdev->irq);
5361 val = GRC_MISC_CFG_CORECLK_RESET;
5363 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5364 if (tr32(0x7e2c) == 0x60) {
5367 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5368 tw32(GRC_MISC_CFG, (1 << 29));
5373 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5374 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5375 tw32(GRC_VCPU_EXT_CTRL,
5376 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5379 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5380 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5381 tw32(GRC_MISC_CFG, val);
5383 /* restore 5701 hardware bug workaround write method */
5384 tp->write32 = write_op;
5386 /* Unfortunately, we have to delay before the PCI read back.
5387 * Some 575X chips even will not respond to a PCI cfg access
5388 * when the reset command is given to the chip.
5390 * How do these hardware designers expect things to work
5391 * properly if the PCI write is posted for a long period
5392 * of time? It is always necessary to have some method by
5393 * which a register read back can occur to push the write
5394 * out which does the reset.
5396 * For most tg3 variants the trick below was working.
5401 /* Flush PCI posted writes. The normal MMIO registers
5402 * are inaccessible at this time so this is the only
5403 * way to make this reliably (actually, this is no longer
5404 * the case, see above). I tried to use indirect
5405 * register read/write but this upset some 5701 variants.
5407 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5411 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5412 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5416 /* Wait for link training to complete. */
5417 for (i = 0; i < 5000; i++)
5420 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5421 pci_write_config_dword(tp->pdev, 0xc4,
5422 cfg_val | (1 << 15));
5424 /* Set PCIE max payload size and clear error status. */
5425 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5428 tg3_restore_pci_state(tp);
5430 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5433 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5434 val = tr32(MEMARB_MODE);
5435 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5437 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5439 tw32(0x5000, 0x400);
5442 tw32(GRC_MODE, tp->grc_mode);
5444 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5447 tw32(0xc4, val | (1 << 15));
5450 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5451 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5452 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5453 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5454 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5455 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5458 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5459 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5460 tw32_f(MAC_MODE, tp->mac_mode);
5461 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5462 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5463 tw32_f(MAC_MODE, tp->mac_mode);
5465 tw32_f(MAC_MODE, 0);
5468 err = tg3_poll_fw(tp);
5472 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5473 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5476 tw32(0x7c00, val | (1 << 25));
5479 /* Reprobe ASF enable state. */
5480 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5481 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5482 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5483 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5486 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5487 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5488 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5489 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5490 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5497 /* tp->lock is held. */
5498 static void tg3_stop_fw(struct tg3 *tp)
5500 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5501 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5505 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5506 val = tr32(GRC_RX_CPU_EVENT);
5508 tw32(GRC_RX_CPU_EVENT, val);
5510 /* Wait for RX cpu to ACK the event. */
5511 for (i = 0; i < 100; i++) {
5512 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5519 /* tp->lock is held. */
5520 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5526 tg3_write_sig_pre_reset(tp, kind);
5528 tg3_abort_hw(tp, silent);
5529 err = tg3_chip_reset(tp);
5531 tg3_write_sig_legacy(tp, kind);
5532 tg3_write_sig_post_reset(tp, kind);
5540 #define TG3_FW_RELEASE_MAJOR 0x0
5541 #define TG3_FW_RELASE_MINOR 0x0
5542 #define TG3_FW_RELEASE_FIX 0x0
5543 #define TG3_FW_START_ADDR 0x08000000
5544 #define TG3_FW_TEXT_ADDR 0x08000000
5545 #define TG3_FW_TEXT_LEN 0x9c0
5546 #define TG3_FW_RODATA_ADDR 0x080009c0
5547 #define TG3_FW_RODATA_LEN 0x60
5548 #define TG3_FW_DATA_ADDR 0x08000a40
5549 #define TG3_FW_DATA_LEN 0x20
5550 #define TG3_FW_SBSS_ADDR 0x08000a60
5551 #define TG3_FW_SBSS_LEN 0xc
5552 #define TG3_FW_BSS_ADDR 0x08000a70
5553 #define TG3_FW_BSS_LEN 0x10
5555 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5556 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5557 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5558 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5559 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5560 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5561 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5562 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5563 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5564 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5565 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5566 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5567 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5568 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5569 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5570 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5571 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5572 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5573 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5574 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5575 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5576 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5577 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5578 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5579 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5580 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5582 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5583 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5584 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5585 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5586 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5587 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5588 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5589 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5590 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5591 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5592 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5593 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5594 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5595 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5596 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5597 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5598 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5599 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5600 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5601 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5602 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5603 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5604 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5605 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5606 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5607 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5608 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5609 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5610 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5611 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5612 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5613 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5614 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5615 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5616 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5617 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5618 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5619 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5620 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5621 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5622 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5623 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5624 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5625 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5626 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5627 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5628 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5629 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5630 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5631 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5632 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5633 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5634 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5635 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5636 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5637 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5638 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5639 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5640 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5641 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5642 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5643 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5644 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5645 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5646 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5649 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5650 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5651 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5652 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5653 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5657 #if 0 /* All zeros, don't eat up space with it. */
5658 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5659 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5660 0x00000000, 0x00000000, 0x00000000, 0x00000000
5664 #define RX_CPU_SCRATCH_BASE 0x30000
5665 #define RX_CPU_SCRATCH_SIZE 0x04000
5666 #define TX_CPU_SCRATCH_BASE 0x34000
5667 #define TX_CPU_SCRATCH_SIZE 0x04000
5669 /* tp->lock is held. */
5670 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5674 BUG_ON(offset == TX_CPU_BASE &&
5675 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5677 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5678 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5680 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5683 if (offset == RX_CPU_BASE) {
5684 for (i = 0; i < 10000; i++) {
5685 tw32(offset + CPU_STATE, 0xffffffff);
5686 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5687 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5691 tw32(offset + CPU_STATE, 0xffffffff);
5692 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5695 for (i = 0; i < 10000; i++) {
5696 tw32(offset + CPU_STATE, 0xffffffff);
5697 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5698 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5704 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5707 (offset == RX_CPU_BASE ? "RX" : "TX"));
5711 /* Clear firmware's nvram arbitration. */
5712 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5713 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5718 unsigned int text_base;
5719 unsigned int text_len;
5720 const u32 *text_data;
5721 unsigned int rodata_base;
5722 unsigned int rodata_len;
5723 const u32 *rodata_data;
5724 unsigned int data_base;
5725 unsigned int data_len;
5726 const u32 *data_data;
5729 /* tp->lock is held. */
5730 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5731 int cpu_scratch_size, struct fw_info *info)
5733 int err, lock_err, i;
5734 void (*write_op)(struct tg3 *, u32, u32);
5736 if (cpu_base == TX_CPU_BASE &&
5737 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5738 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5739 "TX cpu firmware on %s which is 5705.\n",
5744 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5745 write_op = tg3_write_mem;
5747 write_op = tg3_write_indirect_reg32;
5749 /* It is possible that bootcode is still loading at this point.
5750 * Get the nvram lock first before halting the cpu.
5752 lock_err = tg3_nvram_lock(tp);
5753 err = tg3_halt_cpu(tp, cpu_base);
5755 tg3_nvram_unlock(tp);
5759 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5760 write_op(tp, cpu_scratch_base + i, 0);
5761 tw32(cpu_base + CPU_STATE, 0xffffffff);
5762 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5763 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5764 write_op(tp, (cpu_scratch_base +
5765 (info->text_base & 0xffff) +
5768 info->text_data[i] : 0));
5769 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5770 write_op(tp, (cpu_scratch_base +
5771 (info->rodata_base & 0xffff) +
5773 (info->rodata_data ?
5774 info->rodata_data[i] : 0));
5775 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5776 write_op(tp, (cpu_scratch_base +
5777 (info->data_base & 0xffff) +
5780 info->data_data[i] : 0));
5788 /* tp->lock is held. */
5789 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5791 struct fw_info info;
5794 info.text_base = TG3_FW_TEXT_ADDR;
5795 info.text_len = TG3_FW_TEXT_LEN;
5796 info.text_data = &tg3FwText[0];
5797 info.rodata_base = TG3_FW_RODATA_ADDR;
5798 info.rodata_len = TG3_FW_RODATA_LEN;
5799 info.rodata_data = &tg3FwRodata[0];
5800 info.data_base = TG3_FW_DATA_ADDR;
5801 info.data_len = TG3_FW_DATA_LEN;
5802 info.data_data = NULL;
5804 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5805 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5810 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5811 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5816 /* Now startup only the RX cpu. */
5817 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5818 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5820 for (i = 0; i < 5; i++) {
5821 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5823 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5824 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5825 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5829 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5830 "to set RX CPU PC, is %08x should be %08x\n",
5831 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5835 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5836 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5842 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
5843 #define TG3_TSO_FW_RELASE_MINOR 0x6
5844 #define TG3_TSO_FW_RELEASE_FIX 0x0
5845 #define TG3_TSO_FW_START_ADDR 0x08000000
5846 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
5847 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
5848 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5849 #define TG3_TSO_FW_RODATA_LEN 0x60
5850 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
5851 #define TG3_TSO_FW_DATA_LEN 0x30
5852 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5853 #define TG3_TSO_FW_SBSS_LEN 0x2c
5854 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
5855 #define TG3_TSO_FW_BSS_LEN 0x894
5857 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5858 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5859 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5860 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5861 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5862 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5863 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5864 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5865 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5866 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5867 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5868 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5869 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5870 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5871 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5872 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5873 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5874 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5875 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5876 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5877 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5878 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5879 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5880 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5881 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5882 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5883 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5884 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5885 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5886 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5887 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5888 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5889 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5890 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5891 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5892 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5893 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5894 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5895 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5896 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5897 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5898 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5899 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5900 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5901 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5902 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5903 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5904 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5905 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5906 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5907 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5908 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5909 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5910 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5911 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5912 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5913 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5914 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5915 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5916 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5917 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5918 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5919 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5920 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5921 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5922 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5923 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5924 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5925 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5926 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5927 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5928 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5929 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5930 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5931 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5932 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5933 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5934 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5935 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5936 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5937 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5938 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5939 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5940 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5941 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5942 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5943 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5944 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5945 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5946 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5947 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5948 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5949 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5950 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5951 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5952 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5953 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5954 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5955 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5956 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5957 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5958 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5959 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5960 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5961 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5962 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5963 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5964 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5965 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5966 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5967 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5968 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5969 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5970 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5971 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5972 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5973 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5974 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5975 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5976 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5977 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5978 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5979 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5980 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5981 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5982 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5983 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5984 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5985 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5986 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5987 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5988 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5989 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5990 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5991 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5992 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5993 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5994 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5995 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5996 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5997 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5998 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5999 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6000 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6001 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6002 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6003 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6004 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6005 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6006 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6007 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6008 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6009 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6010 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6011 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6012 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6013 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6014 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6015 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6016 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6017 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6018 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6019 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6020 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6021 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6022 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6023 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6024 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6025 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6026 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6027 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6028 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6029 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6030 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6031 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6032 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6033 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6034 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6035 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6036 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6037 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6038 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6039 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6040 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6041 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6042 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6043 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6044 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6045 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6046 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6047 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6048 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6049 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6050 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6051 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6052 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6053 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6054 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6055 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6056 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6057 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6058 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6059 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6060 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6061 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6062 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6063 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6064 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6065 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6066 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6067 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6068 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6069 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6070 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6071 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6072 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6073 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6074 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6075 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6076 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6077 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6078 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6079 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6080 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6081 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6082 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6083 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6084 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6085 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6086 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6087 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6088 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6089 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6090 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6091 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6092 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6093 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6094 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6095 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6096 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6097 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6098 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6099 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6100 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6101 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6102 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6103 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6104 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6105 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6106 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6107 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6108 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6109 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6110 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6111 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6112 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6113 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6114 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6115 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6116 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6117 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6118 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6119 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6120 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6121 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6122 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6123 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6124 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6125 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6126 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6127 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6128 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6129 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6130 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6131 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6132 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6133 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6134 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6135 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6136 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6137 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6138 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6139 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6140 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6141 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6144 static const u32 tg3TsoFwRodata[] = {
6145 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6146 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6147 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6148 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6152 static const u32 tg3TsoFwData[] = {
6153 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6154 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6158 /* 5705 needs a special version of the TSO firmware. */
6159 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6160 #define TG3_TSO5_FW_RELASE_MINOR 0x2
6161 #define TG3_TSO5_FW_RELEASE_FIX 0x0
6162 #define TG3_TSO5_FW_START_ADDR 0x00010000
6163 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6164 #define TG3_TSO5_FW_TEXT_LEN 0xe90
6165 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6166 #define TG3_TSO5_FW_RODATA_LEN 0x50
6167 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6168 #define TG3_TSO5_FW_DATA_LEN 0x20
6169 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6170 #define TG3_TSO5_FW_SBSS_LEN 0x28
6171 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6172 #define TG3_TSO5_FW_BSS_LEN 0x88
6174 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6175 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6176 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6177 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6178 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6179 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6180 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6181 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6182 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6183 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6184 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6185 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6186 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6187 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6188 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6189 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6190 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6191 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6192 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6193 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6194 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6195 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6196 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6197 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6198 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6199 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6200 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6201 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6202 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6203 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6204 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6205 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6206 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6207 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6208 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6209 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6210 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6211 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6212 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6213 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6214 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6215 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6216 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6217 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6218 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6219 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6220 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6221 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6222 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6223 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6224 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6225 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6226 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6227 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6228 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6229 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6230 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6231 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6232 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6233 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6234 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6235 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6236 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6237 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6238 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6239 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6240 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6241 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6242 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6243 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6244 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6245 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6246 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6247 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6248 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6249 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6250 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6251 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6252 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6253 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6254 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6255 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6256 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6257 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6258 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6259 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6260 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6261 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6262 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6263 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6264 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6265 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6266 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6267 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6268 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6269 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6270 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6271 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6272 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6273 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6274 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6275 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6276 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6277 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6278 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6279 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6280 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6281 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6282 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6283 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6284 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6285 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6286 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6287 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6288 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6289 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6290 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6291 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6292 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6293 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6294 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6295 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6296 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6297 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6298 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6299 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6300 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6301 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6302 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6303 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6304 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6305 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6306 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6307 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6308 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6309 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6310 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6311 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6312 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6313 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6314 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6315 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6316 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6317 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6318 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6319 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6320 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6321 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6322 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6323 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6324 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6325 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6326 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6327 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6328 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6329 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6330 0x00000000, 0x00000000, 0x00000000,
6333 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6334 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6335 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6336 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6337 0x00000000, 0x00000000, 0x00000000,
6340 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6341 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6342 0x00000000, 0x00000000, 0x00000000,
6345 /* tp->lock is held. */
6346 static int tg3_load_tso_firmware(struct tg3 *tp)
6348 struct fw_info info;
6349 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6352 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6355 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6356 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6357 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6358 info.text_data = &tg3Tso5FwText[0];
6359 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6360 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6361 info.rodata_data = &tg3Tso5FwRodata[0];
6362 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6363 info.data_len = TG3_TSO5_FW_DATA_LEN;
6364 info.data_data = &tg3Tso5FwData[0];
6365 cpu_base = RX_CPU_BASE;
6366 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6367 cpu_scratch_size = (info.text_len +
6370 TG3_TSO5_FW_SBSS_LEN +
6371 TG3_TSO5_FW_BSS_LEN);
6373 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6374 info.text_len = TG3_TSO_FW_TEXT_LEN;
6375 info.text_data = &tg3TsoFwText[0];
6376 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6377 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6378 info.rodata_data = &tg3TsoFwRodata[0];
6379 info.data_base = TG3_TSO_FW_DATA_ADDR;
6380 info.data_len = TG3_TSO_FW_DATA_LEN;
6381 info.data_data = &tg3TsoFwData[0];
6382 cpu_base = TX_CPU_BASE;
6383 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6384 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6387 err = tg3_load_firmware_cpu(tp, cpu_base,
6388 cpu_scratch_base, cpu_scratch_size,
6393 /* Now startup the cpu. */
6394 tw32(cpu_base + CPU_STATE, 0xffffffff);
6395 tw32_f(cpu_base + CPU_PC, info.text_base);
6397 for (i = 0; i < 5; i++) {
6398 if (tr32(cpu_base + CPU_PC) == info.text_base)
6400 tw32(cpu_base + CPU_STATE, 0xffffffff);
6401 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6402 tw32_f(cpu_base + CPU_PC, info.text_base);
6406 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6407 "to set CPU PC, is %08x should be %08x\n",
6408 tp->dev->name, tr32(cpu_base + CPU_PC),
6412 tw32(cpu_base + CPU_STATE, 0xffffffff);
6413 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6418 /* tp->lock is held. */
6419 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6421 u32 addr_high, addr_low;
6424 addr_high = ((tp->dev->dev_addr[0] << 8) |
6425 tp->dev->dev_addr[1]);
6426 addr_low = ((tp->dev->dev_addr[2] << 24) |
6427 (tp->dev->dev_addr[3] << 16) |
6428 (tp->dev->dev_addr[4] << 8) |
6429 (tp->dev->dev_addr[5] << 0));
6430 for (i = 0; i < 4; i++) {
6431 if (i == 1 && skip_mac_1)
6433 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6434 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6437 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6438 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6439 for (i = 0; i < 12; i++) {
6440 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6441 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6445 addr_high = (tp->dev->dev_addr[0] +
6446 tp->dev->dev_addr[1] +
6447 tp->dev->dev_addr[2] +
6448 tp->dev->dev_addr[3] +
6449 tp->dev->dev_addr[4] +
6450 tp->dev->dev_addr[5]) &
6451 TX_BACKOFF_SEED_MASK;
6452 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6455 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6457 struct tg3 *tp = netdev_priv(dev);
6458 struct sockaddr *addr = p;
6459 int err = 0, skip_mac_1 = 0;
6461 if (!is_valid_ether_addr(addr->sa_data))
6464 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6466 if (!netif_running(dev))
6469 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6470 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6472 addr0_high = tr32(MAC_ADDR_0_HIGH);
6473 addr0_low = tr32(MAC_ADDR_0_LOW);
6474 addr1_high = tr32(MAC_ADDR_1_HIGH);
6475 addr1_low = tr32(MAC_ADDR_1_LOW);
6477 /* Skip MAC addr 1 if ASF is using it. */
6478 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6479 !(addr1_high == 0 && addr1_low == 0))
6482 spin_lock_bh(&tp->lock);
6483 __tg3_set_mac_addr(tp, skip_mac_1);
6484 spin_unlock_bh(&tp->lock);
6489 /* tp->lock is held. */
6490 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6491 dma_addr_t mapping, u32 maxlen_flags,
6495 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6496 ((u64) mapping >> 32));
6498 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6499 ((u64) mapping & 0xffffffff));
6501 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6504 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6506 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6510 static void __tg3_set_rx_mode(struct net_device *);
6511 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6513 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6514 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6515 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6516 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6517 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6518 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6519 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6521 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6522 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6523 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6524 u32 val = ec->stats_block_coalesce_usecs;
6526 if (!netif_carrier_ok(tp->dev))
6529 tw32(HOSTCC_STAT_COAL_TICKS, val);
6533 /* tp->lock is held. */
6534 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6536 u32 val, rdmac_mode;
6539 tg3_disable_ints(tp);
6543 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6545 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6546 tg3_abort_hw(tp, 1);
6552 err = tg3_chip_reset(tp);
6556 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6558 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6559 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6560 val = tr32(TG3_CPMU_CTRL);
6561 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6562 tw32(TG3_CPMU_CTRL, val);
6564 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6565 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6566 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6567 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6569 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6570 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6571 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6572 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6574 val = tr32(TG3_CPMU_HST_ACC);
6575 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6576 val |= CPMU_HST_ACC_MACCLK_6_25;
6577 tw32(TG3_CPMU_HST_ACC, val);
6580 /* This works around an issue with Athlon chipsets on
6581 * B3 tigon3 silicon. This bit has no effect on any
6582 * other revision. But do not set this on PCI Express
6583 * chips and don't even touch the clocks if the CPMU is present.
6585 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6586 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6587 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6588 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6591 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6592 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6593 val = tr32(TG3PCI_PCISTATE);
6594 val |= PCISTATE_RETRY_SAME_DMA;
6595 tw32(TG3PCI_PCISTATE, val);
6598 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6599 /* Allow reads and writes to the
6600 * APE register and memory space.
6602 val = tr32(TG3PCI_PCISTATE);
6603 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6604 PCISTATE_ALLOW_APE_SHMEM_WR;
6605 tw32(TG3PCI_PCISTATE, val);
6608 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6609 /* Enable some hw fixes. */
6610 val = tr32(TG3PCI_MSI_DATA);
6611 val |= (1 << 26) | (1 << 28) | (1 << 29);
6612 tw32(TG3PCI_MSI_DATA, val);
6615 /* Descriptor ring init may make accesses to the
6616 * NIC SRAM area to setup the TX descriptors, so we
6617 * can only do this after the hardware has been
6618 * successfully reset.
6620 err = tg3_init_rings(tp);
6624 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6625 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6626 /* This value is determined during the probe time DMA
6627 * engine test, tg3_test_dma.
6629 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6632 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6633 GRC_MODE_4X_NIC_SEND_RINGS |
6634 GRC_MODE_NO_TX_PHDR_CSUM |
6635 GRC_MODE_NO_RX_PHDR_CSUM);
6636 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6638 /* Pseudo-header checksum is done by hardware logic and not
6639 * the offload processers, so make the chip do the pseudo-
6640 * header checksums on receive. For transmit it is more
6641 * convenient to do the pseudo-header checksum in software
6642 * as Linux does that on transmit for us in all cases.
6644 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6648 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6650 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6651 val = tr32(GRC_MISC_CFG);
6653 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6654 tw32(GRC_MISC_CFG, val);
6656 /* Initialize MBUF/DESC pool. */
6657 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6659 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6660 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6662 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6664 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6665 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6666 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6668 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6671 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6672 TG3_TSO5_FW_RODATA_LEN +
6673 TG3_TSO5_FW_DATA_LEN +
6674 TG3_TSO5_FW_SBSS_LEN +
6675 TG3_TSO5_FW_BSS_LEN);
6676 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6677 tw32(BUFMGR_MB_POOL_ADDR,
6678 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6679 tw32(BUFMGR_MB_POOL_SIZE,
6680 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6683 if (tp->dev->mtu <= ETH_DATA_LEN) {
6684 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6685 tp->bufmgr_config.mbuf_read_dma_low_water);
6686 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6687 tp->bufmgr_config.mbuf_mac_rx_low_water);
6688 tw32(BUFMGR_MB_HIGH_WATER,
6689 tp->bufmgr_config.mbuf_high_water);
6691 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6692 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6693 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6694 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6695 tw32(BUFMGR_MB_HIGH_WATER,
6696 tp->bufmgr_config.mbuf_high_water_jumbo);
6698 tw32(BUFMGR_DMA_LOW_WATER,
6699 tp->bufmgr_config.dma_low_water);
6700 tw32(BUFMGR_DMA_HIGH_WATER,
6701 tp->bufmgr_config.dma_high_water);
6703 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6704 for (i = 0; i < 2000; i++) {
6705 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6710 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6715 /* Setup replenish threshold. */
6716 val = tp->rx_pending / 8;
6719 else if (val > tp->rx_std_max_post)
6720 val = tp->rx_std_max_post;
6721 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6722 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6723 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6725 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6726 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6729 tw32(RCVBDI_STD_THRESH, val);
6731 /* Initialize TG3_BDINFO's at:
6732 * RCVDBDI_STD_BD: standard eth size rx ring
6733 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6734 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6737 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6738 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6739 * ring attribute flags
6740 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6742 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6743 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6745 * The size of each ring is fixed in the firmware, but the location is
6748 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6749 ((u64) tp->rx_std_mapping >> 32));
6750 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6751 ((u64) tp->rx_std_mapping & 0xffffffff));
6752 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6753 NIC_SRAM_RX_BUFFER_DESC);
6755 /* Don't even try to program the JUMBO/MINI buffer descriptor
6758 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6759 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6760 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6762 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6763 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6765 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6766 BDINFO_FLAGS_DISABLED);
6768 /* Setup replenish threshold. */
6769 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6771 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6772 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6773 ((u64) tp->rx_jumbo_mapping >> 32));
6774 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6775 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6776 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6777 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6778 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6779 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6781 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6782 BDINFO_FLAGS_DISABLED);
6787 /* There is only one send ring on 5705/5750, no need to explicitly
6788 * disable the others.
6790 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6791 /* Clear out send RCB ring in SRAM. */
6792 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6793 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6794 BDINFO_FLAGS_DISABLED);
6799 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6800 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6802 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6803 tp->tx_desc_mapping,
6804 (TG3_TX_RING_SIZE <<
6805 BDINFO_FLAGS_MAXLEN_SHIFT),
6806 NIC_SRAM_TX_BUFFER_DESC);
6808 /* There is only one receive return ring on 5705/5750, no need
6809 * to explicitly disable the others.
6811 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6812 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6813 i += TG3_BDINFO_SIZE) {
6814 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6815 BDINFO_FLAGS_DISABLED);
6820 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6822 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6824 (TG3_RX_RCB_RING_SIZE(tp) <<
6825 BDINFO_FLAGS_MAXLEN_SHIFT),
6828 tp->rx_std_ptr = tp->rx_pending;
6829 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6832 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6833 tp->rx_jumbo_pending : 0;
6834 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6837 /* Initialize MAC address and backoff seed. */
6838 __tg3_set_mac_addr(tp, 0);
6840 /* MTU + ethernet header + FCS + optional VLAN tag */
6841 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6843 /* The slot time is changed by tg3_setup_phy if we
6844 * run at gigabit with half duplex.
6846 tw32(MAC_TX_LENGTHS,
6847 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6848 (6 << TX_LENGTHS_IPG_SHIFT) |
6849 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6851 /* Receive rules. */
6852 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6853 tw32(RCVLPC_CONFIG, 0x0181);
6855 /* Calculate RDMAC_MODE setting early, we need it to determine
6856 * the RCVLPC_STATE_ENABLE mask.
6858 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6859 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6860 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6861 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6862 RDMAC_MODE_LNGREAD_ENAB);
6864 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6865 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6866 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6867 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6869 /* If statement applies to 5705 and 5750 PCI devices only */
6870 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6871 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6872 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6873 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6874 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6875 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6876 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6877 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6878 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6882 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6883 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6885 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6886 rdmac_mode |= (1 << 27);
6888 /* Receive/send statistics. */
6889 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6890 val = tr32(RCVLPC_STATS_ENABLE);
6891 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6892 tw32(RCVLPC_STATS_ENABLE, val);
6893 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6894 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6895 val = tr32(RCVLPC_STATS_ENABLE);
6896 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6897 tw32(RCVLPC_STATS_ENABLE, val);
6899 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6901 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6902 tw32(SNDDATAI_STATSENAB, 0xffffff);
6903 tw32(SNDDATAI_STATSCTRL,
6904 (SNDDATAI_SCTRL_ENABLE |
6905 SNDDATAI_SCTRL_FASTUPD));
6907 /* Setup host coalescing engine. */
6908 tw32(HOSTCC_MODE, 0);
6909 for (i = 0; i < 2000; i++) {
6910 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6915 __tg3_set_coalesce(tp, &tp->coal);
6917 /* set status block DMA address */
6918 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6919 ((u64) tp->status_mapping >> 32));
6920 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6921 ((u64) tp->status_mapping & 0xffffffff));
6923 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6924 /* Status/statistics block address. See tg3_timer,
6925 * the tg3_periodic_fetch_stats call there, and
6926 * tg3_get_stats to see how this works for 5705/5750 chips.
6928 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6929 ((u64) tp->stats_mapping >> 32));
6930 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6931 ((u64) tp->stats_mapping & 0xffffffff));
6932 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6933 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6936 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6938 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6939 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6940 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6941 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6943 /* Clear statistics/status block in chip, and status block in ram. */
6944 for (i = NIC_SRAM_STATS_BLK;
6945 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6947 tg3_write_mem(tp, i, 0);
6950 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6952 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6953 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6954 /* reset to prevent losing 1st rx packet intermittently */
6955 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6959 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6960 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6961 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6962 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6963 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6964 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6965 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6968 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6969 * If TG3_FLG2_IS_NIC is zero, we should read the
6970 * register to preserve the GPIO settings for LOMs. The GPIOs,
6971 * whether used as inputs or outputs, are set by boot code after
6974 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6977 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6978 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6979 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6982 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6983 GRC_LCLCTRL_GPIO_OUTPUT3;
6985 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6986 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6988 tp->grc_local_ctrl &= ~gpio_mask;
6989 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6991 /* GPIO1 must be driven high for eeprom write protect */
6992 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6993 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6994 GRC_LCLCTRL_GPIO_OUTPUT1);
6996 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6999 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7002 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7003 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7007 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7008 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7009 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7010 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7011 WDMAC_MODE_LNGREAD_ENAB);
7013 /* If statement applies to 5705 and 5750 PCI devices only */
7014 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7015 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7016 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7017 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7018 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7019 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7021 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7022 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7023 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7024 val |= WDMAC_MODE_RX_ACCEL;
7028 /* Enable host coalescing bug fix */
7029 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7030 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7031 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7032 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
7035 tw32_f(WDMAC_MODE, val);
7038 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7041 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7043 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7044 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7045 pcix_cmd |= PCI_X_CMD_READ_2K;
7046 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7047 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7048 pcix_cmd |= PCI_X_CMD_READ_2K;
7050 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7054 tw32_f(RDMAC_MODE, rdmac_mode);
7057 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7058 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7059 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7063 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7065 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7067 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7068 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7069 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7070 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7071 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7072 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7073 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7074 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7076 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7077 err = tg3_load_5701_a0_firmware_fix(tp);
7082 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7083 err = tg3_load_tso_firmware(tp);
7088 tp->tx_mode = TX_MODE_ENABLE;
7089 tw32_f(MAC_TX_MODE, tp->tx_mode);
7092 tp->rx_mode = RX_MODE_ENABLE;
7093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7095 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7097 tw32_f(MAC_RX_MODE, tp->rx_mode);
7100 if (tp->link_config.phy_is_low_power) {
7101 tp->link_config.phy_is_low_power = 0;
7102 tp->link_config.speed = tp->link_config.orig_speed;
7103 tp->link_config.duplex = tp->link_config.orig_duplex;
7104 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7107 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
7108 tw32_f(MAC_MI_MODE, tp->mi_mode);
7111 tw32(MAC_LED_CTRL, tp->led_ctrl);
7113 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7114 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7115 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7118 tw32_f(MAC_RX_MODE, tp->rx_mode);
7121 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7122 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7123 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7124 /* Set drive transmission level to 1.2V */
7125 /* only if the signal pre-emphasis bit is not set */
7126 val = tr32(MAC_SERDES_CFG);
7129 tw32(MAC_SERDES_CFG, val);
7131 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7132 tw32(MAC_SERDES_CFG, 0x616000);
7135 /* Prevent chip from dropping frames when flow control
7138 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7140 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7141 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7142 /* Use hardware link auto-negotiation */
7143 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7146 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7147 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7150 tmp = tr32(SERDES_RX_CTRL);
7151 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7152 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7153 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7154 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7157 err = tg3_setup_phy(tp, 0);
7161 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7162 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7165 /* Clear CRC stats. */
7166 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7167 tg3_writephy(tp, MII_TG3_TEST1,
7168 tmp | MII_TG3_TEST1_CRC_EN);
7169 tg3_readphy(tp, 0x14, &tmp);
7173 __tg3_set_rx_mode(tp->dev);
7175 /* Initialize receive rules. */
7176 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7177 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7178 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7179 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7181 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7182 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7186 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7190 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7192 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7194 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7196 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7198 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7200 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7202 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7204 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7206 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7208 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7210 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7212 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7214 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7216 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7224 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7225 /* Write our heartbeat update interval to APE. */
7226 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7227 APE_HOST_HEARTBEAT_INT_DISABLE);
7229 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7234 /* Called at device open time to get the chip ready for
7235 * packet processing. Invoked with tp->lock held.
7237 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7241 /* Force the chip into D0. */
7242 err = tg3_set_power_state(tp, PCI_D0);
7246 tg3_switch_clocks(tp);
7248 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7250 err = tg3_reset_hw(tp, reset_phy);
7256 #define TG3_STAT_ADD32(PSTAT, REG) \
7257 do { u32 __val = tr32(REG); \
7258 (PSTAT)->low += __val; \
7259 if ((PSTAT)->low < __val) \
7260 (PSTAT)->high += 1; \
7263 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7265 struct tg3_hw_stats *sp = tp->hw_stats;
7267 if (!netif_carrier_ok(tp->dev))
7270 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7271 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7272 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7273 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7274 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7275 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7276 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7277 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7278 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7279 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7280 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7281 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7282 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7284 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7285 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7286 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7287 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7288 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7289 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7290 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7291 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7292 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7293 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7294 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7295 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7296 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7297 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7299 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7300 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7301 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7304 static void tg3_timer(unsigned long __opaque)
7306 struct tg3 *tp = (struct tg3 *) __opaque;
7311 spin_lock(&tp->lock);
7313 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7314 /* All of this garbage is because when using non-tagged
7315 * IRQ status the mailbox/status_block protocol the chip
7316 * uses with the cpu is race prone.
7318 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7319 tw32(GRC_LOCAL_CTRL,
7320 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7322 tw32(HOSTCC_MODE, tp->coalesce_mode |
7323 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7326 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7327 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7328 spin_unlock(&tp->lock);
7329 schedule_work(&tp->reset_task);
7334 /* This part only runs once per second. */
7335 if (!--tp->timer_counter) {
7336 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7337 tg3_periodic_fetch_stats(tp);
7339 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7343 mac_stat = tr32(MAC_STATUS);
7346 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7347 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7349 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7353 tg3_setup_phy(tp, 0);
7354 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7355 u32 mac_stat = tr32(MAC_STATUS);
7358 if (netif_carrier_ok(tp->dev) &&
7359 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7362 if (! netif_carrier_ok(tp->dev) &&
7363 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7364 MAC_STATUS_SIGNAL_DET))) {
7368 if (!tp->serdes_counter) {
7371 ~MAC_MODE_PORT_MODE_MASK));
7373 tw32_f(MAC_MODE, tp->mac_mode);
7376 tg3_setup_phy(tp, 0);
7378 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7379 tg3_serdes_parallel_detect(tp);
7381 tp->timer_counter = tp->timer_multiplier;
7384 /* Heartbeat is only sent once every 2 seconds.
7386 * The heartbeat is to tell the ASF firmware that the host
7387 * driver is still alive. In the event that the OS crashes,
7388 * ASF needs to reset the hardware to free up the FIFO space
7389 * that may be filled with rx packets destined for the host.
7390 * If the FIFO is full, ASF will no longer function properly.
7392 * Unintended resets have been reported on real time kernels
7393 * where the timer doesn't run on time. Netpoll will also have
7396 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7397 * to check the ring condition when the heartbeat is expiring
7398 * before doing the reset. This will prevent most unintended
7401 if (!--tp->asf_counter) {
7402 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7405 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7406 FWCMD_NICDRV_ALIVE3);
7407 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7408 /* 5 seconds timeout */
7409 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7410 val = tr32(GRC_RX_CPU_EVENT);
7412 tw32(GRC_RX_CPU_EVENT, val);
7414 tp->asf_counter = tp->asf_multiplier;
7417 spin_unlock(&tp->lock);
7420 tp->timer.expires = jiffies + tp->timer_offset;
7421 add_timer(&tp->timer);
7424 static int tg3_request_irq(struct tg3 *tp)
7427 unsigned long flags;
7428 struct net_device *dev = tp->dev;
7430 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7432 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7434 flags = IRQF_SAMPLE_RANDOM;
7437 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7438 fn = tg3_interrupt_tagged;
7439 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7441 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7444 static int tg3_test_interrupt(struct tg3 *tp)
7446 struct net_device *dev = tp->dev;
7447 int err, i, intr_ok = 0;
7449 if (!netif_running(dev))
7452 tg3_disable_ints(tp);
7454 free_irq(tp->pdev->irq, dev);
7456 err = request_irq(tp->pdev->irq, tg3_test_isr,
7457 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7461 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7462 tg3_enable_ints(tp);
7464 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7467 for (i = 0; i < 5; i++) {
7468 u32 int_mbox, misc_host_ctrl;
7470 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7472 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7474 if ((int_mbox != 0) ||
7475 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7483 tg3_disable_ints(tp);
7485 free_irq(tp->pdev->irq, dev);
7487 err = tg3_request_irq(tp);
7498 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7499 * successfully restored
7501 static int tg3_test_msi(struct tg3 *tp)
7503 struct net_device *dev = tp->dev;
7507 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7510 /* Turn off SERR reporting in case MSI terminates with Master
7513 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7514 pci_write_config_word(tp->pdev, PCI_COMMAND,
7515 pci_cmd & ~PCI_COMMAND_SERR);
7517 err = tg3_test_interrupt(tp);
7519 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7524 /* other failures */
7528 /* MSI test failed, go back to INTx mode */
7529 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7530 "switching to INTx mode. Please report this failure to "
7531 "the PCI maintainer and include system chipset information.\n",
7534 free_irq(tp->pdev->irq, dev);
7535 pci_disable_msi(tp->pdev);
7537 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7539 err = tg3_request_irq(tp);
7543 /* Need to reset the chip because the MSI cycle may have terminated
7544 * with Master Abort.
7546 tg3_full_lock(tp, 1);
7548 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7549 err = tg3_init_hw(tp, 1);
7551 tg3_full_unlock(tp);
7554 free_irq(tp->pdev->irq, dev);
7559 static int tg3_open(struct net_device *dev)
7561 struct tg3 *tp = netdev_priv(dev);
7564 netif_carrier_off(tp->dev);
7566 tg3_full_lock(tp, 0);
7568 err = tg3_set_power_state(tp, PCI_D0);
7570 tg3_full_unlock(tp);
7574 tg3_disable_ints(tp);
7575 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7577 tg3_full_unlock(tp);
7579 /* The placement of this call is tied
7580 * to the setup and use of Host TX descriptors.
7582 err = tg3_alloc_consistent(tp);
7586 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7587 /* All MSI supporting chips should support tagged
7588 * status. Assert that this is the case.
7590 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7591 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7592 "Not using MSI.\n", tp->dev->name);
7593 } else if (pci_enable_msi(tp->pdev) == 0) {
7596 msi_mode = tr32(MSGINT_MODE);
7597 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7598 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7601 err = tg3_request_irq(tp);
7604 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7605 pci_disable_msi(tp->pdev);
7606 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7608 tg3_free_consistent(tp);
7612 napi_enable(&tp->napi);
7614 tg3_full_lock(tp, 0);
7616 err = tg3_init_hw(tp, 1);
7618 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7621 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7622 tp->timer_offset = HZ;
7624 tp->timer_offset = HZ / 10;
7626 BUG_ON(tp->timer_offset > HZ);
7627 tp->timer_counter = tp->timer_multiplier =
7628 (HZ / tp->timer_offset);
7629 tp->asf_counter = tp->asf_multiplier =
7630 ((HZ / tp->timer_offset) * 2);
7632 init_timer(&tp->timer);
7633 tp->timer.expires = jiffies + tp->timer_offset;
7634 tp->timer.data = (unsigned long) tp;
7635 tp->timer.function = tg3_timer;
7638 tg3_full_unlock(tp);
7641 napi_disable(&tp->napi);
7642 free_irq(tp->pdev->irq, dev);
7643 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7644 pci_disable_msi(tp->pdev);
7645 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7647 tg3_free_consistent(tp);
7651 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7652 err = tg3_test_msi(tp);
7655 tg3_full_lock(tp, 0);
7657 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7658 pci_disable_msi(tp->pdev);
7659 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7661 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7663 tg3_free_consistent(tp);
7665 tg3_full_unlock(tp);
7667 napi_disable(&tp->napi);
7672 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7673 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7674 u32 val = tr32(PCIE_TRANSACTION_CFG);
7676 tw32(PCIE_TRANSACTION_CFG,
7677 val | PCIE_TRANS_CFG_1SHOT_MSI);
7682 tg3_full_lock(tp, 0);
7684 add_timer(&tp->timer);
7685 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7686 tg3_enable_ints(tp);
7688 tg3_full_unlock(tp);
7690 netif_start_queue(dev);
7696 /*static*/ void tg3_dump_state(struct tg3 *tp)
7698 u32 val32, val32_2, val32_3, val32_4, val32_5;
7702 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7703 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7704 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7708 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7709 tr32(MAC_MODE), tr32(MAC_STATUS));
7710 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7711 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7712 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7713 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7714 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7715 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7717 /* Send data initiator control block */
7718 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7719 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7720 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7721 tr32(SNDDATAI_STATSCTRL));
7723 /* Send data completion control block */
7724 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7726 /* Send BD ring selector block */
7727 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7728 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7730 /* Send BD initiator control block */
7731 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7732 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7734 /* Send BD completion control block */
7735 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7737 /* Receive list placement control block */
7738 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7739 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7740 printk(" RCVLPC_STATSCTRL[%08x]\n",
7741 tr32(RCVLPC_STATSCTRL));
7743 /* Receive data and receive BD initiator control block */
7744 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7745 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7747 /* Receive data completion control block */
7748 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7751 /* Receive BD initiator control block */
7752 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7753 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7755 /* Receive BD completion control block */
7756 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7757 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7759 /* Receive list selector control block */
7760 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7761 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7763 /* Mbuf cluster free block */
7764 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7765 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7767 /* Host coalescing control block */
7768 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7769 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7770 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7771 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7772 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7773 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7774 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7775 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7776 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7777 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7778 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7779 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7781 /* Memory arbiter control block */
7782 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7783 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7785 /* Buffer manager control block */
7786 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7787 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7788 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7789 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7790 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7791 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7792 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7793 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7795 /* Read DMA control block */
7796 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7797 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7799 /* Write DMA control block */
7800 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7801 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7803 /* DMA completion block */
7804 printk("DEBUG: DMAC_MODE[%08x]\n",
7808 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7809 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7810 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7811 tr32(GRC_LOCAL_CTRL));
7814 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7815 tr32(RCVDBDI_JUMBO_BD + 0x0),
7816 tr32(RCVDBDI_JUMBO_BD + 0x4),
7817 tr32(RCVDBDI_JUMBO_BD + 0x8),
7818 tr32(RCVDBDI_JUMBO_BD + 0xc));
7819 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7820 tr32(RCVDBDI_STD_BD + 0x0),
7821 tr32(RCVDBDI_STD_BD + 0x4),
7822 tr32(RCVDBDI_STD_BD + 0x8),
7823 tr32(RCVDBDI_STD_BD + 0xc));
7824 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7825 tr32(RCVDBDI_MINI_BD + 0x0),
7826 tr32(RCVDBDI_MINI_BD + 0x4),
7827 tr32(RCVDBDI_MINI_BD + 0x8),
7828 tr32(RCVDBDI_MINI_BD + 0xc));
7830 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7831 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7832 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7833 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7834 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7835 val32, val32_2, val32_3, val32_4);
7837 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7838 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7839 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7840 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7841 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7842 val32, val32_2, val32_3, val32_4);
7844 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7845 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7846 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7847 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7848 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7849 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7850 val32, val32_2, val32_3, val32_4, val32_5);
7852 /* SW status block */
7853 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7854 tp->hw_status->status,
7855 tp->hw_status->status_tag,
7856 tp->hw_status->rx_jumbo_consumer,
7857 tp->hw_status->rx_consumer,
7858 tp->hw_status->rx_mini_consumer,
7859 tp->hw_status->idx[0].rx_producer,
7860 tp->hw_status->idx[0].tx_consumer);
7862 /* SW statistics block */
7863 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7864 ((u32 *)tp->hw_stats)[0],
7865 ((u32 *)tp->hw_stats)[1],
7866 ((u32 *)tp->hw_stats)[2],
7867 ((u32 *)tp->hw_stats)[3]);
7870 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7871 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7872 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7873 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7874 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7876 /* NIC side send descriptors. */
7877 for (i = 0; i < 6; i++) {
7880 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7881 + (i * sizeof(struct tg3_tx_buffer_desc));
7882 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7884 readl(txd + 0x0), readl(txd + 0x4),
7885 readl(txd + 0x8), readl(txd + 0xc));
7888 /* NIC side RX descriptors. */
7889 for (i = 0; i < 6; i++) {
7892 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7893 + (i * sizeof(struct tg3_rx_buffer_desc));
7894 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7896 readl(rxd + 0x0), readl(rxd + 0x4),
7897 readl(rxd + 0x8), readl(rxd + 0xc));
7898 rxd += (4 * sizeof(u32));
7899 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7901 readl(rxd + 0x0), readl(rxd + 0x4),
7902 readl(rxd + 0x8), readl(rxd + 0xc));
7905 for (i = 0; i < 6; i++) {
7908 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7909 + (i * sizeof(struct tg3_rx_buffer_desc));
7910 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7912 readl(rxd + 0x0), readl(rxd + 0x4),
7913 readl(rxd + 0x8), readl(rxd + 0xc));
7914 rxd += (4 * sizeof(u32));
7915 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7917 readl(rxd + 0x0), readl(rxd + 0x4),
7918 readl(rxd + 0x8), readl(rxd + 0xc));
7923 static struct net_device_stats *tg3_get_stats(struct net_device *);
7924 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7926 static int tg3_close(struct net_device *dev)
7928 struct tg3 *tp = netdev_priv(dev);
7930 napi_disable(&tp->napi);
7931 cancel_work_sync(&tp->reset_task);
7933 netif_stop_queue(dev);
7935 del_timer_sync(&tp->timer);
7937 tg3_full_lock(tp, 1);
7942 tg3_disable_ints(tp);
7944 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7946 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7948 tg3_full_unlock(tp);
7950 free_irq(tp->pdev->irq, dev);
7951 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7952 pci_disable_msi(tp->pdev);
7953 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7956 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7957 sizeof(tp->net_stats_prev));
7958 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7959 sizeof(tp->estats_prev));
7961 tg3_free_consistent(tp);
7963 tg3_set_power_state(tp, PCI_D3hot);
7965 netif_carrier_off(tp->dev);
7970 static inline unsigned long get_stat64(tg3_stat64_t *val)
7974 #if (BITS_PER_LONG == 32)
7977 ret = ((u64)val->high << 32) | ((u64)val->low);
7982 static unsigned long calc_crc_errors(struct tg3 *tp)
7984 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7986 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7987 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7988 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7991 spin_lock_bh(&tp->lock);
7992 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7993 tg3_writephy(tp, MII_TG3_TEST1,
7994 val | MII_TG3_TEST1_CRC_EN);
7995 tg3_readphy(tp, 0x14, &val);
7998 spin_unlock_bh(&tp->lock);
8000 tp->phy_crc_errors += val;
8002 return tp->phy_crc_errors;
8005 return get_stat64(&hw_stats->rx_fcs_errors);
8008 #define ESTAT_ADD(member) \
8009 estats->member = old_estats->member + \
8010 get_stat64(&hw_stats->member)
8012 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8014 struct tg3_ethtool_stats *estats = &tp->estats;
8015 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8016 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8021 ESTAT_ADD(rx_octets);
8022 ESTAT_ADD(rx_fragments);
8023 ESTAT_ADD(rx_ucast_packets);
8024 ESTAT_ADD(rx_mcast_packets);
8025 ESTAT_ADD(rx_bcast_packets);
8026 ESTAT_ADD(rx_fcs_errors);
8027 ESTAT_ADD(rx_align_errors);
8028 ESTAT_ADD(rx_xon_pause_rcvd);
8029 ESTAT_ADD(rx_xoff_pause_rcvd);
8030 ESTAT_ADD(rx_mac_ctrl_rcvd);
8031 ESTAT_ADD(rx_xoff_entered);
8032 ESTAT_ADD(rx_frame_too_long_errors);
8033 ESTAT_ADD(rx_jabbers);
8034 ESTAT_ADD(rx_undersize_packets);
8035 ESTAT_ADD(rx_in_length_errors);
8036 ESTAT_ADD(rx_out_length_errors);
8037 ESTAT_ADD(rx_64_or_less_octet_packets);
8038 ESTAT_ADD(rx_65_to_127_octet_packets);
8039 ESTAT_ADD(rx_128_to_255_octet_packets);
8040 ESTAT_ADD(rx_256_to_511_octet_packets);
8041 ESTAT_ADD(rx_512_to_1023_octet_packets);
8042 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8043 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8044 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8045 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8046 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8048 ESTAT_ADD(tx_octets);
8049 ESTAT_ADD(tx_collisions);
8050 ESTAT_ADD(tx_xon_sent);
8051 ESTAT_ADD(tx_xoff_sent);
8052 ESTAT_ADD(tx_flow_control);
8053 ESTAT_ADD(tx_mac_errors);
8054 ESTAT_ADD(tx_single_collisions);
8055 ESTAT_ADD(tx_mult_collisions);
8056 ESTAT_ADD(tx_deferred);
8057 ESTAT_ADD(tx_excessive_collisions);
8058 ESTAT_ADD(tx_late_collisions);
8059 ESTAT_ADD(tx_collide_2times);
8060 ESTAT_ADD(tx_collide_3times);
8061 ESTAT_ADD(tx_collide_4times);
8062 ESTAT_ADD(tx_collide_5times);
8063 ESTAT_ADD(tx_collide_6times);
8064 ESTAT_ADD(tx_collide_7times);
8065 ESTAT_ADD(tx_collide_8times);
8066 ESTAT_ADD(tx_collide_9times);
8067 ESTAT_ADD(tx_collide_10times);
8068 ESTAT_ADD(tx_collide_11times);
8069 ESTAT_ADD(tx_collide_12times);
8070 ESTAT_ADD(tx_collide_13times);
8071 ESTAT_ADD(tx_collide_14times);
8072 ESTAT_ADD(tx_collide_15times);
8073 ESTAT_ADD(tx_ucast_packets);
8074 ESTAT_ADD(tx_mcast_packets);
8075 ESTAT_ADD(tx_bcast_packets);
8076 ESTAT_ADD(tx_carrier_sense_errors);
8077 ESTAT_ADD(tx_discards);
8078 ESTAT_ADD(tx_errors);
8080 ESTAT_ADD(dma_writeq_full);
8081 ESTAT_ADD(dma_write_prioq_full);
8082 ESTAT_ADD(rxbds_empty);
8083 ESTAT_ADD(rx_discards);
8084 ESTAT_ADD(rx_errors);
8085 ESTAT_ADD(rx_threshold_hit);
8087 ESTAT_ADD(dma_readq_full);
8088 ESTAT_ADD(dma_read_prioq_full);
8089 ESTAT_ADD(tx_comp_queue_full);
8091 ESTAT_ADD(ring_set_send_prod_index);
8092 ESTAT_ADD(ring_status_update);
8093 ESTAT_ADD(nic_irqs);
8094 ESTAT_ADD(nic_avoided_irqs);
8095 ESTAT_ADD(nic_tx_threshold_hit);
8100 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8102 struct tg3 *tp = netdev_priv(dev);
8103 struct net_device_stats *stats = &tp->net_stats;
8104 struct net_device_stats *old_stats = &tp->net_stats_prev;
8105 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8110 stats->rx_packets = old_stats->rx_packets +
8111 get_stat64(&hw_stats->rx_ucast_packets) +
8112 get_stat64(&hw_stats->rx_mcast_packets) +
8113 get_stat64(&hw_stats->rx_bcast_packets);
8115 stats->tx_packets = old_stats->tx_packets +
8116 get_stat64(&hw_stats->tx_ucast_packets) +
8117 get_stat64(&hw_stats->tx_mcast_packets) +
8118 get_stat64(&hw_stats->tx_bcast_packets);
8120 stats->rx_bytes = old_stats->rx_bytes +
8121 get_stat64(&hw_stats->rx_octets);
8122 stats->tx_bytes = old_stats->tx_bytes +
8123 get_stat64(&hw_stats->tx_octets);
8125 stats->rx_errors = old_stats->rx_errors +
8126 get_stat64(&hw_stats->rx_errors);
8127 stats->tx_errors = old_stats->tx_errors +
8128 get_stat64(&hw_stats->tx_errors) +
8129 get_stat64(&hw_stats->tx_mac_errors) +
8130 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8131 get_stat64(&hw_stats->tx_discards);
8133 stats->multicast = old_stats->multicast +
8134 get_stat64(&hw_stats->rx_mcast_packets);
8135 stats->collisions = old_stats->collisions +
8136 get_stat64(&hw_stats->tx_collisions);
8138 stats->rx_length_errors = old_stats->rx_length_errors +
8139 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8140 get_stat64(&hw_stats->rx_undersize_packets);
8142 stats->rx_over_errors = old_stats->rx_over_errors +
8143 get_stat64(&hw_stats->rxbds_empty);
8144 stats->rx_frame_errors = old_stats->rx_frame_errors +
8145 get_stat64(&hw_stats->rx_align_errors);
8146 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8147 get_stat64(&hw_stats->tx_discards);
8148 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8149 get_stat64(&hw_stats->tx_carrier_sense_errors);
8151 stats->rx_crc_errors = old_stats->rx_crc_errors +
8152 calc_crc_errors(tp);
8154 stats->rx_missed_errors = old_stats->rx_missed_errors +
8155 get_stat64(&hw_stats->rx_discards);
8160 static inline u32 calc_crc(unsigned char *buf, int len)
8168 for (j = 0; j < len; j++) {
8171 for (k = 0; k < 8; k++) {
8185 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8187 /* accept or reject all multicast frames */
8188 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8189 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8190 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8191 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8194 static void __tg3_set_rx_mode(struct net_device *dev)
8196 struct tg3 *tp = netdev_priv(dev);
8199 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8200 RX_MODE_KEEP_VLAN_TAG);
8202 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8205 #if TG3_VLAN_TAG_USED
8207 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8208 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8210 /* By definition, VLAN is disabled always in this
8213 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8214 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8217 if (dev->flags & IFF_PROMISC) {
8218 /* Promiscuous mode. */
8219 rx_mode |= RX_MODE_PROMISC;
8220 } else if (dev->flags & IFF_ALLMULTI) {
8221 /* Accept all multicast. */
8222 tg3_set_multi (tp, 1);
8223 } else if (dev->mc_count < 1) {
8224 /* Reject all multicast. */
8225 tg3_set_multi (tp, 0);
8227 /* Accept one or more multicast(s). */
8228 struct dev_mc_list *mclist;
8230 u32 mc_filter[4] = { 0, };
8235 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8236 i++, mclist = mclist->next) {
8238 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8240 regidx = (bit & 0x60) >> 5;
8242 mc_filter[regidx] |= (1 << bit);
8245 tw32(MAC_HASH_REG_0, mc_filter[0]);
8246 tw32(MAC_HASH_REG_1, mc_filter[1]);
8247 tw32(MAC_HASH_REG_2, mc_filter[2]);
8248 tw32(MAC_HASH_REG_3, mc_filter[3]);
8251 if (rx_mode != tp->rx_mode) {
8252 tp->rx_mode = rx_mode;
8253 tw32_f(MAC_RX_MODE, rx_mode);
8258 static void tg3_set_rx_mode(struct net_device *dev)
8260 struct tg3 *tp = netdev_priv(dev);
8262 if (!netif_running(dev))
8265 tg3_full_lock(tp, 0);
8266 __tg3_set_rx_mode(dev);
8267 tg3_full_unlock(tp);
8270 #define TG3_REGDUMP_LEN (32 * 1024)
8272 static int tg3_get_regs_len(struct net_device *dev)
8274 return TG3_REGDUMP_LEN;
8277 static void tg3_get_regs(struct net_device *dev,
8278 struct ethtool_regs *regs, void *_p)
8281 struct tg3 *tp = netdev_priv(dev);
8287 memset(p, 0, TG3_REGDUMP_LEN);
8289 if (tp->link_config.phy_is_low_power)
8292 tg3_full_lock(tp, 0);
8294 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8295 #define GET_REG32_LOOP(base,len) \
8296 do { p = (u32 *)(orig_p + (base)); \
8297 for (i = 0; i < len; i += 4) \
8298 __GET_REG32((base) + i); \
8300 #define GET_REG32_1(reg) \
8301 do { p = (u32 *)(orig_p + (reg)); \
8302 __GET_REG32((reg)); \
8305 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8306 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8307 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8308 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8309 GET_REG32_1(SNDDATAC_MODE);
8310 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8311 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8312 GET_REG32_1(SNDBDC_MODE);
8313 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8314 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8315 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8316 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8317 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8318 GET_REG32_1(RCVDCC_MODE);
8319 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8320 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8321 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8322 GET_REG32_1(MBFREE_MODE);
8323 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8324 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8325 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8326 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8327 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8328 GET_REG32_1(RX_CPU_MODE);
8329 GET_REG32_1(RX_CPU_STATE);
8330 GET_REG32_1(RX_CPU_PGMCTR);
8331 GET_REG32_1(RX_CPU_HWBKPT);
8332 GET_REG32_1(TX_CPU_MODE);
8333 GET_REG32_1(TX_CPU_STATE);
8334 GET_REG32_1(TX_CPU_PGMCTR);
8335 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8336 GET_REG32_LOOP(FTQ_RESET, 0x120);
8337 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8338 GET_REG32_1(DMAC_MODE);
8339 GET_REG32_LOOP(GRC_MODE, 0x4c);
8340 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8341 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8344 #undef GET_REG32_LOOP
8347 tg3_full_unlock(tp);
8350 static int tg3_get_eeprom_len(struct net_device *dev)
8352 struct tg3 *tp = netdev_priv(dev);
8354 return tp->nvram_size;
8357 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8358 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8359 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8361 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8363 struct tg3 *tp = netdev_priv(dev);
8366 u32 i, offset, len, b_offset, b_count;
8369 if (tp->link_config.phy_is_low_power)
8372 offset = eeprom->offset;
8376 eeprom->magic = TG3_EEPROM_MAGIC;
8379 /* adjustments to start on required 4 byte boundary */
8380 b_offset = offset & 3;
8381 b_count = 4 - b_offset;
8382 if (b_count > len) {
8383 /* i.e. offset=1 len=2 */
8386 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8389 memcpy(data, ((char*)&val) + b_offset, b_count);
8392 eeprom->len += b_count;
8395 /* read bytes upto the last 4 byte boundary */
8396 pd = &data[eeprom->len];
8397 for (i = 0; i < (len - (len & 3)); i += 4) {
8398 ret = tg3_nvram_read_le(tp, offset + i, &val);
8403 memcpy(pd + i, &val, 4);
8408 /* read last bytes not ending on 4 byte boundary */
8409 pd = &data[eeprom->len];
8411 b_offset = offset + len - b_count;
8412 ret = tg3_nvram_read_le(tp, b_offset, &val);
8415 memcpy(pd, &val, b_count);
8416 eeprom->len += b_count;
8421 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8423 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8425 struct tg3 *tp = netdev_priv(dev);
8427 u32 offset, len, b_offset, odd_len;
8431 if (tp->link_config.phy_is_low_power)
8434 if (eeprom->magic != TG3_EEPROM_MAGIC)
8437 offset = eeprom->offset;
8440 if ((b_offset = (offset & 3))) {
8441 /* adjustments to start on required 4 byte boundary */
8442 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8453 /* adjustments to end on required 4 byte boundary */
8455 len = (len + 3) & ~3;
8456 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8462 if (b_offset || odd_len) {
8463 buf = kmalloc(len, GFP_KERNEL);
8467 memcpy(buf, &start, 4);
8469 memcpy(buf+len-4, &end, 4);
8470 memcpy(buf + b_offset, data, eeprom->len);
8473 ret = tg3_nvram_write_block(tp, offset, len, buf);
8481 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8483 struct tg3 *tp = netdev_priv(dev);
8485 cmd->supported = (SUPPORTED_Autoneg);
8487 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8488 cmd->supported |= (SUPPORTED_1000baseT_Half |
8489 SUPPORTED_1000baseT_Full);
8491 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8492 cmd->supported |= (SUPPORTED_100baseT_Half |
8493 SUPPORTED_100baseT_Full |
8494 SUPPORTED_10baseT_Half |
8495 SUPPORTED_10baseT_Full |
8497 cmd->port = PORT_TP;
8499 cmd->supported |= SUPPORTED_FIBRE;
8500 cmd->port = PORT_FIBRE;
8503 cmd->advertising = tp->link_config.advertising;
8504 if (netif_running(dev)) {
8505 cmd->speed = tp->link_config.active_speed;
8506 cmd->duplex = tp->link_config.active_duplex;
8508 cmd->phy_address = PHY_ADDR;
8509 cmd->transceiver = 0;
8510 cmd->autoneg = tp->link_config.autoneg;
8516 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8518 struct tg3 *tp = netdev_priv(dev);
8520 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8521 /* These are the only valid advertisement bits allowed. */
8522 if (cmd->autoneg == AUTONEG_ENABLE &&
8523 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8524 ADVERTISED_1000baseT_Full |
8525 ADVERTISED_Autoneg |
8528 /* Fiber can only do SPEED_1000. */
8529 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8530 (cmd->speed != SPEED_1000))
8532 /* Copper cannot force SPEED_1000. */
8533 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8534 (cmd->speed == SPEED_1000))
8536 else if ((cmd->speed == SPEED_1000) &&
8537 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8540 tg3_full_lock(tp, 0);
8542 tp->link_config.autoneg = cmd->autoneg;
8543 if (cmd->autoneg == AUTONEG_ENABLE) {
8544 tp->link_config.advertising = (cmd->advertising |
8545 ADVERTISED_Autoneg);
8546 tp->link_config.speed = SPEED_INVALID;
8547 tp->link_config.duplex = DUPLEX_INVALID;
8549 tp->link_config.advertising = 0;
8550 tp->link_config.speed = cmd->speed;
8551 tp->link_config.duplex = cmd->duplex;
8554 tp->link_config.orig_speed = tp->link_config.speed;
8555 tp->link_config.orig_duplex = tp->link_config.duplex;
8556 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8558 if (netif_running(dev))
8559 tg3_setup_phy(tp, 1);
8561 tg3_full_unlock(tp);
8566 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8568 struct tg3 *tp = netdev_priv(dev);
8570 strcpy(info->driver, DRV_MODULE_NAME);
8571 strcpy(info->version, DRV_MODULE_VERSION);
8572 strcpy(info->fw_version, tp->fw_ver);
8573 strcpy(info->bus_info, pci_name(tp->pdev));
8576 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8578 struct tg3 *tp = netdev_priv(dev);
8580 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8581 wol->supported = WAKE_MAGIC;
8585 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8586 wol->wolopts = WAKE_MAGIC;
8587 memset(&wol->sopass, 0, sizeof(wol->sopass));
8590 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8592 struct tg3 *tp = netdev_priv(dev);
8594 if (wol->wolopts & ~WAKE_MAGIC)
8596 if ((wol->wolopts & WAKE_MAGIC) &&
8597 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8600 spin_lock_bh(&tp->lock);
8601 if (wol->wolopts & WAKE_MAGIC)
8602 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8604 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8605 spin_unlock_bh(&tp->lock);
8610 static u32 tg3_get_msglevel(struct net_device *dev)
8612 struct tg3 *tp = netdev_priv(dev);
8613 return tp->msg_enable;
8616 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8618 struct tg3 *tp = netdev_priv(dev);
8619 tp->msg_enable = value;
8622 static int tg3_set_tso(struct net_device *dev, u32 value)
8624 struct tg3 *tp = netdev_priv(dev);
8626 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8631 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8632 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8634 dev->features |= NETIF_F_TSO6;
8635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8636 dev->features |= NETIF_F_TSO_ECN;
8638 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8640 return ethtool_op_set_tso(dev, value);
8643 static int tg3_nway_reset(struct net_device *dev)
8645 struct tg3 *tp = netdev_priv(dev);
8649 if (!netif_running(dev))
8652 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8655 spin_lock_bh(&tp->lock);
8657 tg3_readphy(tp, MII_BMCR, &bmcr);
8658 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8659 ((bmcr & BMCR_ANENABLE) ||
8660 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8661 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8665 spin_unlock_bh(&tp->lock);
8670 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8672 struct tg3 *tp = netdev_priv(dev);
8674 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8675 ering->rx_mini_max_pending = 0;
8676 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8677 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8679 ering->rx_jumbo_max_pending = 0;
8681 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8683 ering->rx_pending = tp->rx_pending;
8684 ering->rx_mini_pending = 0;
8685 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8686 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8688 ering->rx_jumbo_pending = 0;
8690 ering->tx_pending = tp->tx_pending;
8693 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8695 struct tg3 *tp = netdev_priv(dev);
8696 int irq_sync = 0, err = 0;
8698 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8699 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8700 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8701 (ering->tx_pending <= MAX_SKB_FRAGS) ||
8702 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8703 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8706 if (netif_running(dev)) {
8711 tg3_full_lock(tp, irq_sync);
8713 tp->rx_pending = ering->rx_pending;
8715 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8716 tp->rx_pending > 63)
8717 tp->rx_pending = 63;
8718 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8719 tp->tx_pending = ering->tx_pending;
8721 if (netif_running(dev)) {
8722 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8723 err = tg3_restart_hw(tp, 1);
8725 tg3_netif_start(tp);
8728 tg3_full_unlock(tp);
8733 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8735 struct tg3 *tp = netdev_priv(dev);
8737 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8739 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8740 epause->rx_pause = 1;
8742 epause->rx_pause = 0;
8744 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8745 epause->tx_pause = 1;
8747 epause->tx_pause = 0;
8750 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8752 struct tg3 *tp = netdev_priv(dev);
8753 int irq_sync = 0, err = 0;
8755 if (netif_running(dev)) {
8760 tg3_full_lock(tp, irq_sync);
8762 if (epause->autoneg)
8763 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8765 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8766 if (epause->rx_pause)
8767 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8769 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8770 if (epause->tx_pause)
8771 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8773 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8775 if (netif_running(dev)) {
8776 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8777 err = tg3_restart_hw(tp, 1);
8779 tg3_netif_start(tp);
8782 tg3_full_unlock(tp);
8787 static u32 tg3_get_rx_csum(struct net_device *dev)
8789 struct tg3 *tp = netdev_priv(dev);
8790 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8793 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8795 struct tg3 *tp = netdev_priv(dev);
8797 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8803 spin_lock_bh(&tp->lock);
8805 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8807 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8808 spin_unlock_bh(&tp->lock);
8813 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8815 struct tg3 *tp = netdev_priv(dev);
8817 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8824 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8825 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8826 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8827 ethtool_op_set_tx_ipv6_csum(dev, data);
8829 ethtool_op_set_tx_csum(dev, data);
8834 static int tg3_get_sset_count (struct net_device *dev, int sset)
8838 return TG3_NUM_TEST;
8840 return TG3_NUM_STATS;
8846 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8848 switch (stringset) {
8850 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
8853 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
8856 WARN_ON(1); /* we need a WARN() */
8861 static int tg3_phys_id(struct net_device *dev, u32 data)
8863 struct tg3 *tp = netdev_priv(dev);
8866 if (!netif_running(tp->dev))
8870 data = UINT_MAX / 2;
8872 for (i = 0; i < (data * 2); i++) {
8874 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8875 LED_CTRL_1000MBPS_ON |
8876 LED_CTRL_100MBPS_ON |
8877 LED_CTRL_10MBPS_ON |
8878 LED_CTRL_TRAFFIC_OVERRIDE |
8879 LED_CTRL_TRAFFIC_BLINK |
8880 LED_CTRL_TRAFFIC_LED);
8883 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8884 LED_CTRL_TRAFFIC_OVERRIDE);
8886 if (msleep_interruptible(500))
8889 tw32(MAC_LED_CTRL, tp->led_ctrl);
8893 static void tg3_get_ethtool_stats (struct net_device *dev,
8894 struct ethtool_stats *estats, u64 *tmp_stats)
8896 struct tg3 *tp = netdev_priv(dev);
8897 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8900 #define NVRAM_TEST_SIZE 0x100
8901 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
8902 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
8903 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
8904 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8905 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8907 static int tg3_test_nvram(struct tg3 *tp)
8911 int i, j, k, err = 0, size;
8913 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8916 if (magic == TG3_EEPROM_MAGIC)
8917 size = NVRAM_TEST_SIZE;
8918 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8919 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8920 TG3_EEPROM_SB_FORMAT_1) {
8921 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8922 case TG3_EEPROM_SB_REVISION_0:
8923 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8925 case TG3_EEPROM_SB_REVISION_2:
8926 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8928 case TG3_EEPROM_SB_REVISION_3:
8929 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8936 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8937 size = NVRAM_SELFBOOT_HW_SIZE;
8941 buf = kmalloc(size, GFP_KERNEL);
8946 for (i = 0, j = 0; i < size; i += 4, j++) {
8947 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
8953 /* Selfboot format */
8954 magic = swab32(le32_to_cpu(buf[0]));
8955 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
8956 TG3_EEPROM_MAGIC_FW) {
8957 u8 *buf8 = (u8 *) buf, csum8 = 0;
8959 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
8960 TG3_EEPROM_SB_REVISION_2) {
8961 /* For rev 2, the csum doesn't include the MBA. */
8962 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
8964 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
8967 for (i = 0; i < size; i++)
8980 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
8981 TG3_EEPROM_MAGIC_HW) {
8982 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8983 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8984 u8 *buf8 = (u8 *) buf;
8986 /* Separate the parity bits and the data bytes. */
8987 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8988 if ((i == 0) || (i == 8)) {
8992 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8993 parity[k++] = buf8[i] & msk;
9000 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9001 parity[k++] = buf8[i] & msk;
9004 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9005 parity[k++] = buf8[i] & msk;
9008 data[j++] = buf8[i];
9012 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9013 u8 hw8 = hweight8(data[i]);
9015 if ((hw8 & 0x1) && parity[i])
9017 else if (!(hw8 & 0x1) && !parity[i])
9024 /* Bootstrap checksum at offset 0x10 */
9025 csum = calc_crc((unsigned char *) buf, 0x10);
9026 if(csum != le32_to_cpu(buf[0x10/4]))
9029 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9030 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9031 if (csum != le32_to_cpu(buf[0xfc/4]))
9041 #define TG3_SERDES_TIMEOUT_SEC 2
9042 #define TG3_COPPER_TIMEOUT_SEC 6
9044 static int tg3_test_link(struct tg3 *tp)
9048 if (!netif_running(tp->dev))
9051 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9052 max = TG3_SERDES_TIMEOUT_SEC;
9054 max = TG3_COPPER_TIMEOUT_SEC;
9056 for (i = 0; i < max; i++) {
9057 if (netif_carrier_ok(tp->dev))
9060 if (msleep_interruptible(1000))
9067 /* Only test the commonly used registers */
9068 static int tg3_test_registers(struct tg3 *tp)
9070 int i, is_5705, is_5750;
9071 u32 offset, read_mask, write_mask, val, save_val, read_val;
9075 #define TG3_FL_5705 0x1
9076 #define TG3_FL_NOT_5705 0x2
9077 #define TG3_FL_NOT_5788 0x4
9078 #define TG3_FL_NOT_5750 0x8
9082 /* MAC Control Registers */
9083 { MAC_MODE, TG3_FL_NOT_5705,
9084 0x00000000, 0x00ef6f8c },
9085 { MAC_MODE, TG3_FL_5705,
9086 0x00000000, 0x01ef6b8c },
9087 { MAC_STATUS, TG3_FL_NOT_5705,
9088 0x03800107, 0x00000000 },
9089 { MAC_STATUS, TG3_FL_5705,
9090 0x03800100, 0x00000000 },
9091 { MAC_ADDR_0_HIGH, 0x0000,
9092 0x00000000, 0x0000ffff },
9093 { MAC_ADDR_0_LOW, 0x0000,
9094 0x00000000, 0xffffffff },
9095 { MAC_RX_MTU_SIZE, 0x0000,
9096 0x00000000, 0x0000ffff },
9097 { MAC_TX_MODE, 0x0000,
9098 0x00000000, 0x00000070 },
9099 { MAC_TX_LENGTHS, 0x0000,
9100 0x00000000, 0x00003fff },
9101 { MAC_RX_MODE, TG3_FL_NOT_5705,
9102 0x00000000, 0x000007fc },
9103 { MAC_RX_MODE, TG3_FL_5705,
9104 0x00000000, 0x000007dc },
9105 { MAC_HASH_REG_0, 0x0000,
9106 0x00000000, 0xffffffff },
9107 { MAC_HASH_REG_1, 0x0000,
9108 0x00000000, 0xffffffff },
9109 { MAC_HASH_REG_2, 0x0000,
9110 0x00000000, 0xffffffff },
9111 { MAC_HASH_REG_3, 0x0000,
9112 0x00000000, 0xffffffff },
9114 /* Receive Data and Receive BD Initiator Control Registers. */
9115 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9116 0x00000000, 0xffffffff },
9117 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9118 0x00000000, 0xffffffff },
9119 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9120 0x00000000, 0x00000003 },
9121 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9122 0x00000000, 0xffffffff },
9123 { RCVDBDI_STD_BD+0, 0x0000,
9124 0x00000000, 0xffffffff },
9125 { RCVDBDI_STD_BD+4, 0x0000,
9126 0x00000000, 0xffffffff },
9127 { RCVDBDI_STD_BD+8, 0x0000,
9128 0x00000000, 0xffff0002 },
9129 { RCVDBDI_STD_BD+0xc, 0x0000,
9130 0x00000000, 0xffffffff },
9132 /* Receive BD Initiator Control Registers. */
9133 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9134 0x00000000, 0xffffffff },
9135 { RCVBDI_STD_THRESH, TG3_FL_5705,
9136 0x00000000, 0x000003ff },
9137 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9138 0x00000000, 0xffffffff },
9140 /* Host Coalescing Control Registers. */
9141 { HOSTCC_MODE, TG3_FL_NOT_5705,
9142 0x00000000, 0x00000004 },
9143 { HOSTCC_MODE, TG3_FL_5705,
9144 0x00000000, 0x000000f6 },
9145 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9146 0x00000000, 0xffffffff },
9147 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9148 0x00000000, 0x000003ff },
9149 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9150 0x00000000, 0xffffffff },
9151 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9152 0x00000000, 0x000003ff },
9153 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9154 0x00000000, 0xffffffff },
9155 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9156 0x00000000, 0x000000ff },
9157 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9158 0x00000000, 0xffffffff },
9159 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9160 0x00000000, 0x000000ff },
9161 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9162 0x00000000, 0xffffffff },
9163 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9164 0x00000000, 0xffffffff },
9165 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9166 0x00000000, 0xffffffff },
9167 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9168 0x00000000, 0x000000ff },
9169 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9170 0x00000000, 0xffffffff },
9171 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9172 0x00000000, 0x000000ff },
9173 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9174 0x00000000, 0xffffffff },
9175 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9176 0x00000000, 0xffffffff },
9177 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9178 0x00000000, 0xffffffff },
9179 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9180 0x00000000, 0xffffffff },
9181 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9182 0x00000000, 0xffffffff },
9183 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9184 0xffffffff, 0x00000000 },
9185 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9186 0xffffffff, 0x00000000 },
9188 /* Buffer Manager Control Registers. */
9189 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9190 0x00000000, 0x007fff80 },
9191 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9192 0x00000000, 0x007fffff },
9193 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9194 0x00000000, 0x0000003f },
9195 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9196 0x00000000, 0x000001ff },
9197 { BUFMGR_MB_HIGH_WATER, 0x0000,
9198 0x00000000, 0x000001ff },
9199 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9200 0xffffffff, 0x00000000 },
9201 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9202 0xffffffff, 0x00000000 },
9204 /* Mailbox Registers */
9205 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9206 0x00000000, 0x000001ff },
9207 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9208 0x00000000, 0x000001ff },
9209 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9210 0x00000000, 0x000007ff },
9211 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9212 0x00000000, 0x000001ff },
9214 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9217 is_5705 = is_5750 = 0;
9218 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9220 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9224 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9225 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9228 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9231 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9232 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9235 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9238 offset = (u32) reg_tbl[i].offset;
9239 read_mask = reg_tbl[i].read_mask;
9240 write_mask = reg_tbl[i].write_mask;
9242 /* Save the original register content */
9243 save_val = tr32(offset);
9245 /* Determine the read-only value. */
9246 read_val = save_val & read_mask;
9248 /* Write zero to the register, then make sure the read-only bits
9249 * are not changed and the read/write bits are all zeros.
9255 /* Test the read-only and read/write bits. */
9256 if (((val & read_mask) != read_val) || (val & write_mask))
9259 /* Write ones to all the bits defined by RdMask and WrMask, then
9260 * make sure the read-only bits are not changed and the
9261 * read/write bits are all ones.
9263 tw32(offset, read_mask | write_mask);
9267 /* Test the read-only bits. */
9268 if ((val & read_mask) != read_val)
9271 /* Test the read/write bits. */
9272 if ((val & write_mask) != write_mask)
9275 tw32(offset, save_val);
9281 if (netif_msg_hw(tp))
9282 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9284 tw32(offset, save_val);
9288 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9290 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9294 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9295 for (j = 0; j < len; j += 4) {
9298 tg3_write_mem(tp, offset + j, test_pattern[i]);
9299 tg3_read_mem(tp, offset + j, &val);
9300 if (val != test_pattern[i])
9307 static int tg3_test_memory(struct tg3 *tp)
9309 static struct mem_entry {
9312 } mem_tbl_570x[] = {
9313 { 0x00000000, 0x00b50},
9314 { 0x00002000, 0x1c000},
9315 { 0xffffffff, 0x00000}
9316 }, mem_tbl_5705[] = {
9317 { 0x00000100, 0x0000c},
9318 { 0x00000200, 0x00008},
9319 { 0x00004000, 0x00800},
9320 { 0x00006000, 0x01000},
9321 { 0x00008000, 0x02000},
9322 { 0x00010000, 0x0e000},
9323 { 0xffffffff, 0x00000}
9324 }, mem_tbl_5755[] = {
9325 { 0x00000200, 0x00008},
9326 { 0x00004000, 0x00800},
9327 { 0x00006000, 0x00800},
9328 { 0x00008000, 0x02000},
9329 { 0x00010000, 0x0c000},
9330 { 0xffffffff, 0x00000}
9331 }, mem_tbl_5906[] = {
9332 { 0x00000200, 0x00008},
9333 { 0x00004000, 0x00400},
9334 { 0x00006000, 0x00400},
9335 { 0x00008000, 0x01000},
9336 { 0x00010000, 0x01000},
9337 { 0xffffffff, 0x00000}
9339 struct mem_entry *mem_tbl;
9343 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9344 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9345 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9348 mem_tbl = mem_tbl_5755;
9349 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9350 mem_tbl = mem_tbl_5906;
9352 mem_tbl = mem_tbl_5705;
9354 mem_tbl = mem_tbl_570x;
9356 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9357 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9358 mem_tbl[i].len)) != 0)
9365 #define TG3_MAC_LOOPBACK 0
9366 #define TG3_PHY_LOOPBACK 1
9368 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9370 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9372 struct sk_buff *skb, *rx_skb;
9375 int num_pkts, tx_len, rx_len, i, err;
9376 struct tg3_rx_buffer_desc *desc;
9378 if (loopback_mode == TG3_MAC_LOOPBACK) {
9379 /* HW errata - mac loopback fails in some cases on 5780.
9380 * Normal traffic and PHY loopback are not affected by
9383 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9386 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9387 MAC_MODE_PORT_INT_LPBACK;
9388 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9389 mac_mode |= MAC_MODE_LINK_POLARITY;
9390 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9391 mac_mode |= MAC_MODE_PORT_MODE_MII;
9393 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9394 tw32(MAC_MODE, mac_mode);
9395 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9398 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9401 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9404 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9405 phytest | MII_TG3_EPHY_SHADOW_EN);
9406 if (!tg3_readphy(tp, 0x1b, &phy))
9407 tg3_writephy(tp, 0x1b, phy & ~0x20);
9408 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9410 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9412 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9414 tg3_phy_toggle_automdix(tp, 0);
9416 tg3_writephy(tp, MII_BMCR, val);
9419 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9420 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9421 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9422 mac_mode |= MAC_MODE_PORT_MODE_MII;
9424 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9426 /* reset to prevent losing 1st rx packet intermittently */
9427 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9428 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9430 tw32_f(MAC_RX_MODE, tp->rx_mode);
9432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9433 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9434 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9435 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9436 mac_mode |= MAC_MODE_LINK_POLARITY;
9437 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9438 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9440 tw32(MAC_MODE, mac_mode);
9448 skb = netdev_alloc_skb(tp->dev, tx_len);
9452 tx_data = skb_put(skb, tx_len);
9453 memcpy(tx_data, tp->dev->dev_addr, 6);
9454 memset(tx_data + 6, 0x0, 8);
9456 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9458 for (i = 14; i < tx_len; i++)
9459 tx_data[i] = (u8) (i & 0xff);
9461 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9463 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9468 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9472 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9477 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9479 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9483 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9484 for (i = 0; i < 25; i++) {
9485 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9490 tx_idx = tp->hw_status->idx[0].tx_consumer;
9491 rx_idx = tp->hw_status->idx[0].rx_producer;
9492 if ((tx_idx == tp->tx_prod) &&
9493 (rx_idx == (rx_start_idx + num_pkts)))
9497 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9500 if (tx_idx != tp->tx_prod)
9503 if (rx_idx != rx_start_idx + num_pkts)
9506 desc = &tp->rx_rcb[rx_start_idx];
9507 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9508 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9509 if (opaque_key != RXD_OPAQUE_RING_STD)
9512 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9513 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9516 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9517 if (rx_len != tx_len)
9520 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9522 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9523 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9525 for (i = 14; i < tx_len; i++) {
9526 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9531 /* tg3_free_rings will unmap and free the rx_skb */
9536 #define TG3_MAC_LOOPBACK_FAILED 1
9537 #define TG3_PHY_LOOPBACK_FAILED 2
9538 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9539 TG3_PHY_LOOPBACK_FAILED)
9541 static int tg3_test_loopback(struct tg3 *tp)
9546 if (!netif_running(tp->dev))
9547 return TG3_LOOPBACK_FAILED;
9549 err = tg3_reset_hw(tp, 1);
9551 return TG3_LOOPBACK_FAILED;
9553 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9558 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9560 /* Wait for up to 40 microseconds to acquire lock. */
9561 for (i = 0; i < 4; i++) {
9562 status = tr32(TG3_CPMU_MUTEX_GNT);
9563 if (status == CPMU_MUTEX_GNT_DRIVER)
9568 if (status != CPMU_MUTEX_GNT_DRIVER)
9569 return TG3_LOOPBACK_FAILED;
9571 /* Turn off link-based power management. */
9572 cpmuctrl = tr32(TG3_CPMU_CTRL);
9573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9574 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX)
9576 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9577 CPMU_CTRL_LINK_AWARE_MODE));
9580 cpmuctrl & ~CPMU_CTRL_LINK_AWARE_MODE);
9583 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9584 err |= TG3_MAC_LOOPBACK_FAILED;
9586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9588 tw32(TG3_CPMU_CTRL, cpmuctrl);
9590 /* Release the mutex */
9591 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9594 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9595 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9596 err |= TG3_PHY_LOOPBACK_FAILED;
9602 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9605 struct tg3 *tp = netdev_priv(dev);
9607 if (tp->link_config.phy_is_low_power)
9608 tg3_set_power_state(tp, PCI_D0);
9610 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9612 if (tg3_test_nvram(tp) != 0) {
9613 etest->flags |= ETH_TEST_FL_FAILED;
9616 if (tg3_test_link(tp) != 0) {
9617 etest->flags |= ETH_TEST_FL_FAILED;
9620 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9621 int err, irq_sync = 0;
9623 if (netif_running(dev)) {
9628 tg3_full_lock(tp, irq_sync);
9630 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9631 err = tg3_nvram_lock(tp);
9632 tg3_halt_cpu(tp, RX_CPU_BASE);
9633 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9634 tg3_halt_cpu(tp, TX_CPU_BASE);
9636 tg3_nvram_unlock(tp);
9638 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9641 if (tg3_test_registers(tp) != 0) {
9642 etest->flags |= ETH_TEST_FL_FAILED;
9645 if (tg3_test_memory(tp) != 0) {
9646 etest->flags |= ETH_TEST_FL_FAILED;
9649 if ((data[4] = tg3_test_loopback(tp)) != 0)
9650 etest->flags |= ETH_TEST_FL_FAILED;
9652 tg3_full_unlock(tp);
9654 if (tg3_test_interrupt(tp) != 0) {
9655 etest->flags |= ETH_TEST_FL_FAILED;
9659 tg3_full_lock(tp, 0);
9661 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9662 if (netif_running(dev)) {
9663 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9664 if (!tg3_restart_hw(tp, 1))
9665 tg3_netif_start(tp);
9668 tg3_full_unlock(tp);
9670 if (tp->link_config.phy_is_low_power)
9671 tg3_set_power_state(tp, PCI_D3hot);
9675 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9677 struct mii_ioctl_data *data = if_mii(ifr);
9678 struct tg3 *tp = netdev_priv(dev);
9683 data->phy_id = PHY_ADDR;
9689 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9690 break; /* We have no PHY */
9692 if (tp->link_config.phy_is_low_power)
9695 spin_lock_bh(&tp->lock);
9696 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9697 spin_unlock_bh(&tp->lock);
9699 data->val_out = mii_regval;
9705 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9706 break; /* We have no PHY */
9708 if (!capable(CAP_NET_ADMIN))
9711 if (tp->link_config.phy_is_low_power)
9714 spin_lock_bh(&tp->lock);
9715 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9716 spin_unlock_bh(&tp->lock);
9727 #if TG3_VLAN_TAG_USED
9728 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9730 struct tg3 *tp = netdev_priv(dev);
9732 if (netif_running(dev))
9735 tg3_full_lock(tp, 0);
9739 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9740 __tg3_set_rx_mode(dev);
9742 if (netif_running(dev))
9743 tg3_netif_start(tp);
9745 tg3_full_unlock(tp);
9749 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9751 struct tg3 *tp = netdev_priv(dev);
9753 memcpy(ec, &tp->coal, sizeof(*ec));
9757 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9759 struct tg3 *tp = netdev_priv(dev);
9760 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9761 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9763 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9764 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9765 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9766 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9767 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9770 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9771 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9772 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9773 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9774 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9775 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9776 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9777 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9778 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9779 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9782 /* No rx interrupts will be generated if both are zero */
9783 if ((ec->rx_coalesce_usecs == 0) &&
9784 (ec->rx_max_coalesced_frames == 0))
9787 /* No tx interrupts will be generated if both are zero */
9788 if ((ec->tx_coalesce_usecs == 0) &&
9789 (ec->tx_max_coalesced_frames == 0))
9792 /* Only copy relevant parameters, ignore all others. */
9793 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9794 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9795 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9796 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9797 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9798 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9799 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9800 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9801 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9803 if (netif_running(dev)) {
9804 tg3_full_lock(tp, 0);
9805 __tg3_set_coalesce(tp, &tp->coal);
9806 tg3_full_unlock(tp);
9811 static const struct ethtool_ops tg3_ethtool_ops = {
9812 .get_settings = tg3_get_settings,
9813 .set_settings = tg3_set_settings,
9814 .get_drvinfo = tg3_get_drvinfo,
9815 .get_regs_len = tg3_get_regs_len,
9816 .get_regs = tg3_get_regs,
9817 .get_wol = tg3_get_wol,
9818 .set_wol = tg3_set_wol,
9819 .get_msglevel = tg3_get_msglevel,
9820 .set_msglevel = tg3_set_msglevel,
9821 .nway_reset = tg3_nway_reset,
9822 .get_link = ethtool_op_get_link,
9823 .get_eeprom_len = tg3_get_eeprom_len,
9824 .get_eeprom = tg3_get_eeprom,
9825 .set_eeprom = tg3_set_eeprom,
9826 .get_ringparam = tg3_get_ringparam,
9827 .set_ringparam = tg3_set_ringparam,
9828 .get_pauseparam = tg3_get_pauseparam,
9829 .set_pauseparam = tg3_set_pauseparam,
9830 .get_rx_csum = tg3_get_rx_csum,
9831 .set_rx_csum = tg3_set_rx_csum,
9832 .set_tx_csum = tg3_set_tx_csum,
9833 .set_sg = ethtool_op_set_sg,
9834 .set_tso = tg3_set_tso,
9835 .self_test = tg3_self_test,
9836 .get_strings = tg3_get_strings,
9837 .phys_id = tg3_phys_id,
9838 .get_ethtool_stats = tg3_get_ethtool_stats,
9839 .get_coalesce = tg3_get_coalesce,
9840 .set_coalesce = tg3_set_coalesce,
9841 .get_sset_count = tg3_get_sset_count,
9844 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9846 u32 cursize, val, magic;
9848 tp->nvram_size = EEPROM_CHIP_SIZE;
9850 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9853 if ((magic != TG3_EEPROM_MAGIC) &&
9854 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9855 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9859 * Size the chip by reading offsets at increasing powers of two.
9860 * When we encounter our validation signature, we know the addressing
9861 * has wrapped around, and thus have our chip size.
9865 while (cursize < tp->nvram_size) {
9866 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9875 tp->nvram_size = cursize;
9878 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9882 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9885 /* Selfboot format */
9886 if (val != TG3_EEPROM_MAGIC) {
9887 tg3_get_eeprom_size(tp);
9891 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9893 tp->nvram_size = (val >> 16) * 1024;
9897 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
9900 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9904 nvcfg1 = tr32(NVRAM_CFG1);
9905 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9906 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9909 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9910 tw32(NVRAM_CFG1, nvcfg1);
9913 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9914 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9915 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9916 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9917 tp->nvram_jedecnum = JEDEC_ATMEL;
9918 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9919 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9921 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9922 tp->nvram_jedecnum = JEDEC_ATMEL;
9923 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9925 case FLASH_VENDOR_ATMEL_EEPROM:
9926 tp->nvram_jedecnum = JEDEC_ATMEL;
9927 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9928 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9930 case FLASH_VENDOR_ST:
9931 tp->nvram_jedecnum = JEDEC_ST;
9932 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9933 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9935 case FLASH_VENDOR_SAIFUN:
9936 tp->nvram_jedecnum = JEDEC_SAIFUN;
9937 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9939 case FLASH_VENDOR_SST_SMALL:
9940 case FLASH_VENDOR_SST_LARGE:
9941 tp->nvram_jedecnum = JEDEC_SST;
9942 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9947 tp->nvram_jedecnum = JEDEC_ATMEL;
9948 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9949 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9953 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9957 nvcfg1 = tr32(NVRAM_CFG1);
9959 /* NVRAM protection for TPM */
9960 if (nvcfg1 & (1 << 27))
9961 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9963 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9964 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9965 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9966 tp->nvram_jedecnum = JEDEC_ATMEL;
9967 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9969 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9970 tp->nvram_jedecnum = JEDEC_ATMEL;
9971 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9972 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9974 case FLASH_5752VENDOR_ST_M45PE10:
9975 case FLASH_5752VENDOR_ST_M45PE20:
9976 case FLASH_5752VENDOR_ST_M45PE40:
9977 tp->nvram_jedecnum = JEDEC_ST;
9978 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9979 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9983 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9984 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9985 case FLASH_5752PAGE_SIZE_256:
9986 tp->nvram_pagesize = 256;
9988 case FLASH_5752PAGE_SIZE_512:
9989 tp->nvram_pagesize = 512;
9991 case FLASH_5752PAGE_SIZE_1K:
9992 tp->nvram_pagesize = 1024;
9994 case FLASH_5752PAGE_SIZE_2K:
9995 tp->nvram_pagesize = 2048;
9997 case FLASH_5752PAGE_SIZE_4K:
9998 tp->nvram_pagesize = 4096;
10000 case FLASH_5752PAGE_SIZE_264:
10001 tp->nvram_pagesize = 264;
10006 /* For eeprom, set pagesize to maximum eeprom size */
10007 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10009 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10010 tw32(NVRAM_CFG1, nvcfg1);
10014 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10016 u32 nvcfg1, protect = 0;
10018 nvcfg1 = tr32(NVRAM_CFG1);
10020 /* NVRAM protection for TPM */
10021 if (nvcfg1 & (1 << 27)) {
10022 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10026 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10028 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10029 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10030 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10031 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10032 tp->nvram_jedecnum = JEDEC_ATMEL;
10033 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10034 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10035 tp->nvram_pagesize = 264;
10036 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10037 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10038 tp->nvram_size = (protect ? 0x3e200 :
10039 TG3_NVRAM_SIZE_512KB);
10040 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10041 tp->nvram_size = (protect ? 0x1f200 :
10042 TG3_NVRAM_SIZE_256KB);
10044 tp->nvram_size = (protect ? 0x1f200 :
10045 TG3_NVRAM_SIZE_128KB);
10047 case FLASH_5752VENDOR_ST_M45PE10:
10048 case FLASH_5752VENDOR_ST_M45PE20:
10049 case FLASH_5752VENDOR_ST_M45PE40:
10050 tp->nvram_jedecnum = JEDEC_ST;
10051 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10052 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10053 tp->nvram_pagesize = 256;
10054 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10055 tp->nvram_size = (protect ?
10056 TG3_NVRAM_SIZE_64KB :
10057 TG3_NVRAM_SIZE_128KB);
10058 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10059 tp->nvram_size = (protect ?
10060 TG3_NVRAM_SIZE_64KB :
10061 TG3_NVRAM_SIZE_256KB);
10063 tp->nvram_size = (protect ?
10064 TG3_NVRAM_SIZE_128KB :
10065 TG3_NVRAM_SIZE_512KB);
10070 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10074 nvcfg1 = tr32(NVRAM_CFG1);
10076 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10077 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10078 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10079 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10080 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10081 tp->nvram_jedecnum = JEDEC_ATMEL;
10082 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10083 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10085 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10086 tw32(NVRAM_CFG1, nvcfg1);
10088 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10089 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10090 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10091 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10092 tp->nvram_jedecnum = JEDEC_ATMEL;
10093 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10094 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10095 tp->nvram_pagesize = 264;
10097 case FLASH_5752VENDOR_ST_M45PE10:
10098 case FLASH_5752VENDOR_ST_M45PE20:
10099 case FLASH_5752VENDOR_ST_M45PE40:
10100 tp->nvram_jedecnum = JEDEC_ST;
10101 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10102 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10103 tp->nvram_pagesize = 256;
10108 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10110 u32 nvcfg1, protect = 0;
10112 nvcfg1 = tr32(NVRAM_CFG1);
10114 /* NVRAM protection for TPM */
10115 if (nvcfg1 & (1 << 27)) {
10116 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10120 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10122 case FLASH_5761VENDOR_ATMEL_ADB021D:
10123 case FLASH_5761VENDOR_ATMEL_ADB041D:
10124 case FLASH_5761VENDOR_ATMEL_ADB081D:
10125 case FLASH_5761VENDOR_ATMEL_ADB161D:
10126 case FLASH_5761VENDOR_ATMEL_MDB021D:
10127 case FLASH_5761VENDOR_ATMEL_MDB041D:
10128 case FLASH_5761VENDOR_ATMEL_MDB081D:
10129 case FLASH_5761VENDOR_ATMEL_MDB161D:
10130 tp->nvram_jedecnum = JEDEC_ATMEL;
10131 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10132 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10133 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10134 tp->nvram_pagesize = 256;
10136 case FLASH_5761VENDOR_ST_A_M45PE20:
10137 case FLASH_5761VENDOR_ST_A_M45PE40:
10138 case FLASH_5761VENDOR_ST_A_M45PE80:
10139 case FLASH_5761VENDOR_ST_A_M45PE16:
10140 case FLASH_5761VENDOR_ST_M_M45PE20:
10141 case FLASH_5761VENDOR_ST_M_M45PE40:
10142 case FLASH_5761VENDOR_ST_M_M45PE80:
10143 case FLASH_5761VENDOR_ST_M_M45PE16:
10144 tp->nvram_jedecnum = JEDEC_ST;
10145 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10146 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10147 tp->nvram_pagesize = 256;
10152 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10155 case FLASH_5761VENDOR_ATMEL_ADB161D:
10156 case FLASH_5761VENDOR_ATMEL_MDB161D:
10157 case FLASH_5761VENDOR_ST_A_M45PE16:
10158 case FLASH_5761VENDOR_ST_M_M45PE16:
10159 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10161 case FLASH_5761VENDOR_ATMEL_ADB081D:
10162 case FLASH_5761VENDOR_ATMEL_MDB081D:
10163 case FLASH_5761VENDOR_ST_A_M45PE80:
10164 case FLASH_5761VENDOR_ST_M_M45PE80:
10165 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10167 case FLASH_5761VENDOR_ATMEL_ADB041D:
10168 case FLASH_5761VENDOR_ATMEL_MDB041D:
10169 case FLASH_5761VENDOR_ST_A_M45PE40:
10170 case FLASH_5761VENDOR_ST_M_M45PE40:
10171 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10173 case FLASH_5761VENDOR_ATMEL_ADB021D:
10174 case FLASH_5761VENDOR_ATMEL_MDB021D:
10175 case FLASH_5761VENDOR_ST_A_M45PE20:
10176 case FLASH_5761VENDOR_ST_M_M45PE20:
10177 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10183 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10185 tp->nvram_jedecnum = JEDEC_ATMEL;
10186 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10187 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10190 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10191 static void __devinit tg3_nvram_init(struct tg3 *tp)
10193 tw32_f(GRC_EEPROM_ADDR,
10194 (EEPROM_ADDR_FSM_RESET |
10195 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10196 EEPROM_ADDR_CLKPERD_SHIFT)));
10200 /* Enable seeprom accesses. */
10201 tw32_f(GRC_LOCAL_CTRL,
10202 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10205 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10206 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10207 tp->tg3_flags |= TG3_FLAG_NVRAM;
10209 if (tg3_nvram_lock(tp)) {
10210 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10211 "tg3_nvram_init failed.\n", tp->dev->name);
10214 tg3_enable_nvram_access(tp);
10216 tp->nvram_size = 0;
10218 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10219 tg3_get_5752_nvram_info(tp);
10220 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10221 tg3_get_5755_nvram_info(tp);
10222 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10224 tg3_get_5787_nvram_info(tp);
10225 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10226 tg3_get_5761_nvram_info(tp);
10227 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10228 tg3_get_5906_nvram_info(tp);
10230 tg3_get_nvram_info(tp);
10232 if (tp->nvram_size == 0)
10233 tg3_get_nvram_size(tp);
10235 tg3_disable_nvram_access(tp);
10236 tg3_nvram_unlock(tp);
10239 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10241 tg3_get_eeprom_size(tp);
10245 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10246 u32 offset, u32 *val)
10251 if (offset > EEPROM_ADDR_ADDR_MASK ||
10255 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10256 EEPROM_ADDR_DEVID_MASK |
10258 tw32(GRC_EEPROM_ADDR,
10260 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10261 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10262 EEPROM_ADDR_ADDR_MASK) |
10263 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10265 for (i = 0; i < 1000; i++) {
10266 tmp = tr32(GRC_EEPROM_ADDR);
10268 if (tmp & EEPROM_ADDR_COMPLETE)
10272 if (!(tmp & EEPROM_ADDR_COMPLETE))
10275 *val = tr32(GRC_EEPROM_DATA);
10279 #define NVRAM_CMD_TIMEOUT 10000
10281 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10285 tw32(NVRAM_CMD, nvram_cmd);
10286 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10288 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10293 if (i == NVRAM_CMD_TIMEOUT) {
10299 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10301 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10302 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10303 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10304 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10305 (tp->nvram_jedecnum == JEDEC_ATMEL))
10307 addr = ((addr / tp->nvram_pagesize) <<
10308 ATMEL_AT45DB0X1B_PAGE_POS) +
10309 (addr % tp->nvram_pagesize);
10314 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10316 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10317 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10318 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10319 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10320 (tp->nvram_jedecnum == JEDEC_ATMEL))
10322 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10323 tp->nvram_pagesize) +
10324 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10329 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10333 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10334 return tg3_nvram_read_using_eeprom(tp, offset, val);
10336 offset = tg3_nvram_phys_addr(tp, offset);
10338 if (offset > NVRAM_ADDR_MSK)
10341 ret = tg3_nvram_lock(tp);
10345 tg3_enable_nvram_access(tp);
10347 tw32(NVRAM_ADDR, offset);
10348 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10349 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10352 *val = swab32(tr32(NVRAM_RDDATA));
10354 tg3_disable_nvram_access(tp);
10356 tg3_nvram_unlock(tp);
10361 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10364 int res = tg3_nvram_read(tp, offset, &v);
10366 *val = cpu_to_le32(v);
10370 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10375 err = tg3_nvram_read(tp, offset, &tmp);
10376 *val = swab32(tmp);
10380 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10381 u32 offset, u32 len, u8 *buf)
10386 for (i = 0; i < len; i += 4) {
10392 memcpy(&data, buf + i, 4);
10394 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10396 val = tr32(GRC_EEPROM_ADDR);
10397 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10399 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10401 tw32(GRC_EEPROM_ADDR, val |
10402 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10403 (addr & EEPROM_ADDR_ADDR_MASK) |
10404 EEPROM_ADDR_START |
10405 EEPROM_ADDR_WRITE);
10407 for (j = 0; j < 1000; j++) {
10408 val = tr32(GRC_EEPROM_ADDR);
10410 if (val & EEPROM_ADDR_COMPLETE)
10414 if (!(val & EEPROM_ADDR_COMPLETE)) {
10423 /* offset and length are dword aligned */
10424 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10428 u32 pagesize = tp->nvram_pagesize;
10429 u32 pagemask = pagesize - 1;
10433 tmp = kmalloc(pagesize, GFP_KERNEL);
10439 u32 phy_addr, page_off, size;
10441 phy_addr = offset & ~pagemask;
10443 for (j = 0; j < pagesize; j += 4) {
10444 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10445 (__le32 *) (tmp + j))))
10451 page_off = offset & pagemask;
10458 memcpy(tmp + page_off, buf, size);
10460 offset = offset + (pagesize - page_off);
10462 tg3_enable_nvram_access(tp);
10465 * Before we can erase the flash page, we need
10466 * to issue a special "write enable" command.
10468 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10470 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10473 /* Erase the target page */
10474 tw32(NVRAM_ADDR, phy_addr);
10476 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10477 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10479 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10482 /* Issue another write enable to start the write. */
10483 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10485 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10488 for (j = 0; j < pagesize; j += 4) {
10491 data = *((__be32 *) (tmp + j));
10492 /* swab32(le32_to_cpu(data)), actually */
10493 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10495 tw32(NVRAM_ADDR, phy_addr + j);
10497 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10501 nvram_cmd |= NVRAM_CMD_FIRST;
10502 else if (j == (pagesize - 4))
10503 nvram_cmd |= NVRAM_CMD_LAST;
10505 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10512 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10513 tg3_nvram_exec_cmd(tp, nvram_cmd);
10520 /* offset and length are dword aligned */
10521 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10526 for (i = 0; i < len; i += 4, offset += 4) {
10527 u32 page_off, phy_addr, nvram_cmd;
10530 memcpy(&data, buf + i, 4);
10531 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10533 page_off = offset % tp->nvram_pagesize;
10535 phy_addr = tg3_nvram_phys_addr(tp, offset);
10537 tw32(NVRAM_ADDR, phy_addr);
10539 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10541 if ((page_off == 0) || (i == 0))
10542 nvram_cmd |= NVRAM_CMD_FIRST;
10543 if (page_off == (tp->nvram_pagesize - 4))
10544 nvram_cmd |= NVRAM_CMD_LAST;
10546 if (i == (len - 4))
10547 nvram_cmd |= NVRAM_CMD_LAST;
10549 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10550 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10551 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10552 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10553 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10554 (tp->nvram_jedecnum == JEDEC_ST) &&
10555 (nvram_cmd & NVRAM_CMD_FIRST)) {
10557 if ((ret = tg3_nvram_exec_cmd(tp,
10558 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10563 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10564 /* We always do complete word writes to eeprom. */
10565 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10568 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10574 /* offset and length are dword aligned */
10575 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10579 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10580 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10581 ~GRC_LCLCTRL_GPIO_OUTPUT1);
10585 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10586 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10591 ret = tg3_nvram_lock(tp);
10595 tg3_enable_nvram_access(tp);
10596 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10597 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10598 tw32(NVRAM_WRITE1, 0x406);
10600 grc_mode = tr32(GRC_MODE);
10601 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10603 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10604 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10606 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10610 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10614 grc_mode = tr32(GRC_MODE);
10615 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10617 tg3_disable_nvram_access(tp);
10618 tg3_nvram_unlock(tp);
10621 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10622 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10629 struct subsys_tbl_ent {
10630 u16 subsys_vendor, subsys_devid;
10634 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10635 /* Broadcom boards. */
10636 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10637 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10638 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10639 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10640 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10641 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10642 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10643 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10644 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10645 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10646 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10649 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10650 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10651 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10652 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10653 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10656 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10657 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10658 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10659 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10661 /* Compaq boards. */
10662 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10663 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10664 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10665 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10666 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10669 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10672 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10676 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10677 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10678 tp->pdev->subsystem_vendor) &&
10679 (subsys_id_to_phy_id[i].subsys_devid ==
10680 tp->pdev->subsystem_device))
10681 return &subsys_id_to_phy_id[i];
10686 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10691 /* On some early chips the SRAM cannot be accessed in D3hot state,
10692 * so need make sure we're in D0.
10694 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10695 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10696 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10699 /* Make sure register accesses (indirect or otherwise)
10700 * will function correctly.
10702 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10703 tp->misc_host_ctrl);
10705 /* The memory arbiter has to be enabled in order for SRAM accesses
10706 * to succeed. Normally on powerup the tg3 chip firmware will make
10707 * sure it is enabled, but other entities such as system netboot
10708 * code might disable it.
10710 val = tr32(MEMARB_MODE);
10711 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10713 tp->phy_id = PHY_ID_INVALID;
10714 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10716 /* Assume an onboard device and WOL capable by default. */
10717 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10719 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10720 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10721 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10722 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10724 val = tr32(VCPU_CFGSHDW);
10725 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10726 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10727 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10728 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10729 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10733 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10734 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10735 u32 nic_cfg, led_cfg;
10736 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10737 int eeprom_phy_serdes = 0;
10739 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10740 tp->nic_sram_data_cfg = nic_cfg;
10742 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10743 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10744 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10745 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10746 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10747 (ver > 0) && (ver < 0x100))
10748 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10750 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10751 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10752 eeprom_phy_serdes = 1;
10754 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10755 if (nic_phy_id != 0) {
10756 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10757 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10759 eeprom_phy_id = (id1 >> 16) << 10;
10760 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10761 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10765 tp->phy_id = eeprom_phy_id;
10766 if (eeprom_phy_serdes) {
10767 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10768 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10770 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10773 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10774 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10775 SHASTA_EXT_LED_MODE_MASK);
10777 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10781 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10782 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10785 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10786 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10789 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10790 tp->led_ctrl = LED_CTRL_MODE_MAC;
10792 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10793 * read on some older 5700/5701 bootcode.
10795 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10797 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10799 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10803 case SHASTA_EXT_LED_SHARED:
10804 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10805 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10806 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10807 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10808 LED_CTRL_MODE_PHY_2);
10811 case SHASTA_EXT_LED_MAC:
10812 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10815 case SHASTA_EXT_LED_COMBO:
10816 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10817 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10818 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10819 LED_CTRL_MODE_PHY_2);
10824 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10825 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10826 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10827 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10829 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
10830 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10832 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10833 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10834 if ((tp->pdev->subsystem_vendor ==
10835 PCI_VENDOR_ID_ARIMA) &&
10836 (tp->pdev->subsystem_device == 0x205a ||
10837 tp->pdev->subsystem_device == 0x2063))
10838 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10840 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10841 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10844 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10845 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10846 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10847 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10849 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10850 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10851 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10852 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10853 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10855 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10856 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10857 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10859 if (cfg2 & (1 << 17))
10860 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10862 /* serdes signal pre-emphasis in register 0x590 set by */
10863 /* bootcode if bit 18 is set */
10864 if (cfg2 & (1 << 18))
10865 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10867 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10870 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10871 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10872 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10877 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
10882 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
10883 tw32(OTP_CTRL, cmd);
10885 /* Wait for up to 1 ms for command to execute. */
10886 for (i = 0; i < 100; i++) {
10887 val = tr32(OTP_STATUS);
10888 if (val & OTP_STATUS_CMD_DONE)
10893 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
10896 /* Read the gphy configuration from the OTP region of the chip. The gphy
10897 * configuration is a 32-bit value that straddles the alignment boundary.
10898 * We do two 32-bit reads and then shift and merge the results.
10900 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
10902 u32 bhalf_otp, thalf_otp;
10904 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
10906 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
10909 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
10911 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10914 thalf_otp = tr32(OTP_READ_DATA);
10916 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
10918 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10921 bhalf_otp = tr32(OTP_READ_DATA);
10923 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
10926 static int __devinit tg3_phy_probe(struct tg3 *tp)
10928 u32 hw_phy_id_1, hw_phy_id_2;
10929 u32 hw_phy_id, hw_phy_id_masked;
10932 /* Reading the PHY ID register can conflict with ASF
10933 * firwmare access to the PHY hardware.
10936 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10937 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10938 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10940 /* Now read the physical PHY_ID from the chip and verify
10941 * that it is sane. If it doesn't look good, we fall back
10942 * to either the hard-coded table based PHY_ID and failing
10943 * that the value found in the eeprom area.
10945 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10946 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10948 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
10949 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10950 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
10952 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10955 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10956 tp->phy_id = hw_phy_id;
10957 if (hw_phy_id_masked == PHY_ID_BCM8002)
10958 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10960 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10962 if (tp->phy_id != PHY_ID_INVALID) {
10963 /* Do nothing, phy ID already set up in
10964 * tg3_get_eeprom_hw_cfg().
10967 struct subsys_tbl_ent *p;
10969 /* No eeprom signature? Try the hardcoded
10970 * subsys device table.
10972 p = lookup_by_subsys(tp);
10976 tp->phy_id = p->phy_id;
10978 tp->phy_id == PHY_ID_BCM8002)
10979 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10983 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10984 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10985 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10986 u32 bmsr, adv_reg, tg3_ctrl, mask;
10988 tg3_readphy(tp, MII_BMSR, &bmsr);
10989 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10990 (bmsr & BMSR_LSTATUS))
10991 goto skip_phy_reset;
10993 err = tg3_phy_reset(tp);
10997 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10998 ADVERTISE_100HALF | ADVERTISE_100FULL |
10999 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11001 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11002 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11003 MII_TG3_CTRL_ADV_1000_FULL);
11004 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11005 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11006 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11007 MII_TG3_CTRL_ENABLE_AS_MASTER);
11010 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11011 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11012 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11013 if (!tg3_copper_is_advertising_all(tp, mask)) {
11014 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11016 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11017 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11019 tg3_writephy(tp, MII_BMCR,
11020 BMCR_ANENABLE | BMCR_ANRESTART);
11022 tg3_phy_set_wirespeed(tp);
11024 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11025 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11026 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11030 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11031 err = tg3_init_5401phy_dsp(tp);
11036 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11037 err = tg3_init_5401phy_dsp(tp);
11040 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11041 tp->link_config.advertising =
11042 (ADVERTISED_1000baseT_Half |
11043 ADVERTISED_1000baseT_Full |
11044 ADVERTISED_Autoneg |
11046 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11047 tp->link_config.advertising &=
11048 ~(ADVERTISED_1000baseT_Half |
11049 ADVERTISED_1000baseT_Full);
11054 static void __devinit tg3_read_partno(struct tg3 *tp)
11056 unsigned char vpd_data[256];
11060 if (tg3_nvram_read_swab(tp, 0x0, &magic))
11061 goto out_not_found;
11063 if (magic == TG3_EEPROM_MAGIC) {
11064 for (i = 0; i < 256; i += 4) {
11067 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11068 goto out_not_found;
11070 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11071 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11072 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11073 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11078 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11079 for (i = 0; i < 256; i += 4) {
11084 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11086 while (j++ < 100) {
11087 pci_read_config_word(tp->pdev, vpd_cap +
11088 PCI_VPD_ADDR, &tmp16);
11089 if (tmp16 & 0x8000)
11093 if (!(tmp16 & 0x8000))
11094 goto out_not_found;
11096 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11098 v = cpu_to_le32(tmp);
11099 memcpy(&vpd_data[i], &v, 4);
11103 /* Now parse and find the part number. */
11104 for (i = 0; i < 254; ) {
11105 unsigned char val = vpd_data[i];
11106 unsigned int block_end;
11108 if (val == 0x82 || val == 0x91) {
11111 (vpd_data[i + 2] << 8)));
11116 goto out_not_found;
11118 block_end = (i + 3 +
11120 (vpd_data[i + 2] << 8)));
11123 if (block_end > 256)
11124 goto out_not_found;
11126 while (i < (block_end - 2)) {
11127 if (vpd_data[i + 0] == 'P' &&
11128 vpd_data[i + 1] == 'N') {
11129 int partno_len = vpd_data[i + 2];
11132 if (partno_len > 24 || (partno_len + i) > 256)
11133 goto out_not_found;
11135 memcpy(tp->board_part_number,
11136 &vpd_data[i], partno_len);
11141 i += 3 + vpd_data[i + 2];
11144 /* Part number not found. */
11145 goto out_not_found;
11149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11150 strcpy(tp->board_part_number, "BCM95906");
11152 strcpy(tp->board_part_number, "none");
11155 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11159 if (tg3_nvram_read_swab(tp, offset, &val) ||
11160 (val & 0xfc000000) != 0x0c000000 ||
11161 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11168 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11170 u32 val, offset, start;
11174 if (tg3_nvram_read_swab(tp, 0, &val))
11177 if (val != TG3_EEPROM_MAGIC)
11180 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11181 tg3_nvram_read_swab(tp, 0x4, &start))
11184 offset = tg3_nvram_logical_addr(tp, offset);
11186 if (!tg3_fw_img_is_valid(tp, offset) ||
11187 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11190 offset = offset + ver_offset - start;
11191 for (i = 0; i < 16; i += 4) {
11193 if (tg3_nvram_read_le(tp, offset + i, &v))
11196 memcpy(tp->fw_ver + i, &v, 4);
11199 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11200 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11203 for (offset = TG3_NVM_DIR_START;
11204 offset < TG3_NVM_DIR_END;
11205 offset += TG3_NVM_DIRENT_SIZE) {
11206 if (tg3_nvram_read_swab(tp, offset, &val))
11209 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11213 if (offset == TG3_NVM_DIR_END)
11216 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11217 start = 0x08000000;
11218 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11221 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11222 !tg3_fw_img_is_valid(tp, offset) ||
11223 tg3_nvram_read_swab(tp, offset + 8, &val))
11226 offset += val - start;
11228 bcnt = strlen(tp->fw_ver);
11230 tp->fw_ver[bcnt++] = ',';
11231 tp->fw_ver[bcnt++] = ' ';
11233 for (i = 0; i < 4; i++) {
11235 if (tg3_nvram_read_le(tp, offset, &v))
11238 offset += sizeof(v);
11240 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11241 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11245 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11249 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11252 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11254 static int __devinit tg3_get_invariants(struct tg3 *tp)
11256 static struct pci_device_id write_reorder_chipsets[] = {
11257 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11258 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11259 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11260 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11261 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11262 PCI_DEVICE_ID_VIA_8385_0) },
11266 u32 cacheline_sz_reg;
11267 u32 pci_state_reg, grc_misc_cfg;
11272 /* Force memory write invalidate off. If we leave it on,
11273 * then on 5700_BX chips we have to enable a workaround.
11274 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11275 * to match the cacheline size. The Broadcom driver have this
11276 * workaround but turns MWI off all the times so never uses
11277 * it. This seems to suggest that the workaround is insufficient.
11279 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11280 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11281 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11283 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11284 * has the register indirect write enable bit set before
11285 * we try to access any of the MMIO registers. It is also
11286 * critical that the PCI-X hw workaround situation is decided
11287 * before that as well.
11289 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11292 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11293 MISC_HOST_CTRL_CHIPREV_SHIFT);
11294 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11295 u32 prod_id_asic_rev;
11297 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11298 &prod_id_asic_rev);
11299 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11302 /* Wrong chip ID in 5752 A0. This code can be removed later
11303 * as A0 is not in production.
11305 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11306 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11308 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11309 * we need to disable memory and use config. cycles
11310 * only to access all registers. The 5702/03 chips
11311 * can mistakenly decode the special cycles from the
11312 * ICH chipsets as memory write cycles, causing corruption
11313 * of register and memory space. Only certain ICH bridges
11314 * will drive special cycles with non-zero data during the
11315 * address phase which can fall within the 5703's address
11316 * range. This is not an ICH bug as the PCI spec allows
11317 * non-zero address during special cycles. However, only
11318 * these ICH bridges are known to drive non-zero addresses
11319 * during special cycles.
11321 * Since special cycles do not cross PCI bridges, we only
11322 * enable this workaround if the 5703 is on the secondary
11323 * bus of these ICH bridges.
11325 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11326 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11327 static struct tg3_dev_id {
11331 } ich_chipsets[] = {
11332 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11334 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11336 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11338 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11342 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11343 struct pci_dev *bridge = NULL;
11345 while (pci_id->vendor != 0) {
11346 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11352 if (pci_id->rev != PCI_ANY_ID) {
11353 if (bridge->revision > pci_id->rev)
11356 if (bridge->subordinate &&
11357 (bridge->subordinate->number ==
11358 tp->pdev->bus->number)) {
11360 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11361 pci_dev_put(bridge);
11367 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11368 static struct tg3_dev_id {
11371 } bridge_chipsets[] = {
11372 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11373 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11376 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11377 struct pci_dev *bridge = NULL;
11379 while (pci_id->vendor != 0) {
11380 bridge = pci_get_device(pci_id->vendor,
11387 if (bridge->subordinate &&
11388 (bridge->subordinate->number <=
11389 tp->pdev->bus->number) &&
11390 (bridge->subordinate->subordinate >=
11391 tp->pdev->bus->number)) {
11392 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11393 pci_dev_put(bridge);
11399 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11400 * DMA addresses > 40-bit. This bridge may have other additional
11401 * 57xx devices behind it in some 4-port NIC designs for example.
11402 * Any tg3 device found behind the bridge will also need the 40-bit
11405 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11406 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11407 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11408 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11409 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11412 struct pci_dev *bridge = NULL;
11415 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11416 PCI_DEVICE_ID_SERVERWORKS_EPB,
11418 if (bridge && bridge->subordinate &&
11419 (bridge->subordinate->number <=
11420 tp->pdev->bus->number) &&
11421 (bridge->subordinate->subordinate >=
11422 tp->pdev->bus->number)) {
11423 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11424 pci_dev_put(bridge);
11430 /* Initialize misc host control in PCI block. */
11431 tp->misc_host_ctrl |= (misc_ctrl_reg &
11432 MISC_HOST_CTRL_CHIPREV);
11433 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11434 tp->misc_host_ctrl);
11436 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11437 &cacheline_sz_reg);
11439 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
11440 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
11441 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
11442 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
11444 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11445 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11446 tp->pdev_peer = tg3_find_peer(tp);
11448 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11449 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11451 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11452 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11453 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11455 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11456 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11458 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11459 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11460 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11462 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11463 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11464 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11465 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11466 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11467 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11468 tp->pdev_peer == tp->pdev))
11469 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11472 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11476 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11477 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11479 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11480 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11482 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11483 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11487 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11488 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11489 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11490 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11491 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11492 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11493 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11494 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11495 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11497 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11498 if (pcie_cap != 0) {
11499 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11501 pcie_set_readrq(tp->pdev, 4096);
11503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11506 pci_read_config_word(tp->pdev,
11507 pcie_cap + PCI_EXP_LNKCTL,
11509 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11510 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11514 /* If we have an AMD 762 or VIA K8T800 chipset, write
11515 * reordering to the mailbox registers done by the host
11516 * controller can cause major troubles. We read back from
11517 * every mailbox register write to force the writes to be
11518 * posted to the chip in order.
11520 if (pci_dev_present(write_reorder_chipsets) &&
11521 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11522 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11525 tp->pci_lat_timer < 64) {
11526 tp->pci_lat_timer = 64;
11528 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11529 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11530 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11531 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11533 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11537 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11538 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11539 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11540 if (!tp->pcix_cap) {
11541 printk(KERN_ERR PFX "Cannot find PCI-X "
11542 "capability, aborting.\n");
11547 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11550 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11551 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11553 /* If this is a 5700 BX chipset, and we are in PCI-X
11554 * mode, enable register write workaround.
11556 * The workaround is to use indirect register accesses
11557 * for all chip writes not to mailbox registers.
11559 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11562 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11564 /* The chip can have it's power management PCI config
11565 * space registers clobbered due to this bug.
11566 * So explicitly force the chip into D0 here.
11568 pci_read_config_dword(tp->pdev,
11569 tp->pm_cap + PCI_PM_CTRL,
11571 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11572 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11573 pci_write_config_dword(tp->pdev,
11574 tp->pm_cap + PCI_PM_CTRL,
11577 /* Also, force SERR#/PERR# in PCI command. */
11578 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11579 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11580 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11584 /* 5700 BX chips need to have their TX producer index mailboxes
11585 * written twice to workaround a bug.
11587 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11588 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11590 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11591 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11592 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11593 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11595 /* Chip-specific fixup from Broadcom driver */
11596 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11597 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11598 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11599 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11602 /* Default fast path register access methods */
11603 tp->read32 = tg3_read32;
11604 tp->write32 = tg3_write32;
11605 tp->read32_mbox = tg3_read32;
11606 tp->write32_mbox = tg3_write32;
11607 tp->write32_tx_mbox = tg3_write32;
11608 tp->write32_rx_mbox = tg3_write32;
11610 /* Various workaround register access methods */
11611 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11612 tp->write32 = tg3_write_indirect_reg32;
11613 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11614 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11615 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11617 * Back to back register writes can cause problems on these
11618 * chips, the workaround is to read back all reg writes
11619 * except those to mailbox regs.
11621 * See tg3_write_indirect_reg32().
11623 tp->write32 = tg3_write_flush_reg32;
11627 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11628 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11629 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11630 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11631 tp->write32_rx_mbox = tg3_write_flush_reg32;
11634 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11635 tp->read32 = tg3_read_indirect_reg32;
11636 tp->write32 = tg3_write_indirect_reg32;
11637 tp->read32_mbox = tg3_read_indirect_mbox;
11638 tp->write32_mbox = tg3_write_indirect_mbox;
11639 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11640 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11645 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11646 pci_cmd &= ~PCI_COMMAND_MEMORY;
11647 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11649 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11650 tp->read32_mbox = tg3_read32_mbox_5906;
11651 tp->write32_mbox = tg3_write32_mbox_5906;
11652 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11653 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11656 if (tp->write32 == tg3_write_indirect_reg32 ||
11657 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11658 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11659 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11660 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11662 /* Get eeprom hw config before calling tg3_set_power_state().
11663 * In particular, the TG3_FLG2_IS_NIC flag must be
11664 * determined before calling tg3_set_power_state() so that
11665 * we know whether or not to switch out of Vaux power.
11666 * When the flag is set, it means that GPIO1 is used for eeprom
11667 * write protect and also implies that it is a LOM where GPIOs
11668 * are not used to switch power.
11670 tg3_get_eeprom_hw_cfg(tp);
11672 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11673 /* Allow reads and writes to the
11674 * APE register and memory space.
11676 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11677 PCISTATE_ALLOW_APE_SHMEM_WR;
11678 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11683 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11684 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11686 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11687 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11688 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11689 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11690 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11693 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11694 * GPIO1 driven high will bring 5700's external PHY out of reset.
11695 * It is also used as eeprom write protect on LOMs.
11697 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11698 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11699 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11700 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11701 GRC_LCLCTRL_GPIO_OUTPUT1);
11702 /* Unused GPIO3 must be driven as output on 5752 because there
11703 * are no pull-up resistors on unused GPIO pins.
11705 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11706 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11708 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11709 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11711 /* Force the chip into D0. */
11712 err = tg3_set_power_state(tp, PCI_D0);
11714 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11715 pci_name(tp->pdev));
11719 /* 5700 B0 chips do not support checksumming correctly due
11720 * to hardware bugs.
11722 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11723 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11725 /* Derive initial jumbo mode from MTU assigned in
11726 * ether_setup() via the alloc_etherdev() call
11728 if (tp->dev->mtu > ETH_DATA_LEN &&
11729 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11730 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11732 /* Determine WakeOnLan speed to use. */
11733 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11734 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11735 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11736 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11737 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11739 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11742 /* A few boards don't want Ethernet@WireSpeed phy feature */
11743 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11744 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11745 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11746 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11747 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11748 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11749 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11751 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11752 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11753 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11754 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11755 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11757 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11758 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11759 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11761 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11762 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11763 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11764 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11765 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11766 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11767 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11768 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11771 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11772 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
11773 tp->phy_otp = tg3_read_otp_phycfg(tp);
11774 if (tp->phy_otp == 0)
11775 tp->phy_otp = TG3_OTP_DEFAULT;
11778 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11780 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11782 tp->mi_mode = MAC_MI_MODE_BASE;
11784 tp->coalesce_mode = 0;
11785 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11786 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11787 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11789 /* Initialize MAC MI mode, polling disabled. */
11790 tw32_f(MAC_MI_MODE, tp->mi_mode);
11793 /* Initialize data/descriptor byte/word swapping. */
11794 val = tr32(GRC_MODE);
11795 val &= GRC_MODE_HOST_STACKUP;
11796 tw32(GRC_MODE, val | tp->grc_mode);
11798 tg3_switch_clocks(tp);
11800 /* Clear this out for sanity. */
11801 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11803 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11805 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11806 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11807 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11809 if (chiprevid == CHIPREV_ID_5701_A0 ||
11810 chiprevid == CHIPREV_ID_5701_B0 ||
11811 chiprevid == CHIPREV_ID_5701_B2 ||
11812 chiprevid == CHIPREV_ID_5701_B5) {
11813 void __iomem *sram_base;
11815 /* Write some dummy words into the SRAM status block
11816 * area, see if it reads back correctly. If the return
11817 * value is bad, force enable the PCIX workaround.
11819 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11821 writel(0x00000000, sram_base);
11822 writel(0x00000000, sram_base + 4);
11823 writel(0xffffffff, sram_base + 4);
11824 if (readl(sram_base) != 0x00000000)
11825 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11830 tg3_nvram_init(tp);
11832 grc_misc_cfg = tr32(GRC_MISC_CFG);
11833 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11835 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11836 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11837 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11838 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11840 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11841 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11842 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11843 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11844 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11845 HOSTCC_MODE_CLRTICK_TXBD);
11847 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11848 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11849 tp->misc_host_ctrl);
11852 /* these are limited to 10/100 only */
11853 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11854 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11855 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11856 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11857 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11858 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11859 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11860 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11861 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11862 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11863 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11864 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11865 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11867 err = tg3_phy_probe(tp);
11869 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11870 pci_name(tp->pdev), err);
11871 /* ... but do not return immediately ... */
11874 tg3_read_partno(tp);
11875 tg3_read_fw_ver(tp);
11877 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11878 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11881 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11883 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11886 /* 5700 {AX,BX} chips have a broken status block link
11887 * change bit implementation, so we must use the
11888 * status register in those cases.
11890 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11891 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11893 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11895 /* The led_ctrl is set during tg3_phy_probe, here we might
11896 * have to force the link status polling mechanism based
11897 * upon subsystem IDs.
11899 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11900 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11901 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11902 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11903 TG3_FLAG_USE_LINKCHG_REG);
11906 /* For all SERDES we poll the MAC status register. */
11907 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11908 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11910 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11912 /* All chips before 5787 can get confused if TX buffers
11913 * straddle the 4GB address boundary in some cases.
11915 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11916 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11917 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11918 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11919 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11920 tp->dev->hard_start_xmit = tg3_start_xmit;
11922 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11925 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11926 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11929 tp->rx_std_max_post = TG3_RX_RING_SIZE;
11931 /* Increment the rx prod index on the rx std ring by at most
11932 * 8 for these chips to workaround hw errata.
11934 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11937 tp->rx_std_max_post = 8;
11939 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11940 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11941 PCIE_PWR_MGMT_L1_THRESH_MSK;
11946 #ifdef CONFIG_SPARC
11947 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11949 struct net_device *dev = tp->dev;
11950 struct pci_dev *pdev = tp->pdev;
11951 struct device_node *dp = pci_device_to_OF_node(pdev);
11952 const unsigned char *addr;
11955 addr = of_get_property(dp, "local-mac-address", &len);
11956 if (addr && len == 6) {
11957 memcpy(dev->dev_addr, addr, 6);
11958 memcpy(dev->perm_addr, dev->dev_addr, 6);
11964 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11966 struct net_device *dev = tp->dev;
11968 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11969 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11974 static int __devinit tg3_get_device_address(struct tg3 *tp)
11976 struct net_device *dev = tp->dev;
11977 u32 hi, lo, mac_offset;
11980 #ifdef CONFIG_SPARC
11981 if (!tg3_get_macaddr_sparc(tp))
11986 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11987 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11988 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11990 if (tg3_nvram_lock(tp))
11991 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11993 tg3_nvram_unlock(tp);
11995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11998 /* First try to get it from MAC address mailbox. */
11999 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12000 if ((hi >> 16) == 0x484b) {
12001 dev->dev_addr[0] = (hi >> 8) & 0xff;
12002 dev->dev_addr[1] = (hi >> 0) & 0xff;
12004 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12005 dev->dev_addr[2] = (lo >> 24) & 0xff;
12006 dev->dev_addr[3] = (lo >> 16) & 0xff;
12007 dev->dev_addr[4] = (lo >> 8) & 0xff;
12008 dev->dev_addr[5] = (lo >> 0) & 0xff;
12010 /* Some old bootcode may report a 0 MAC address in SRAM */
12011 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12014 /* Next, try NVRAM. */
12015 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12016 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12017 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12018 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12019 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12020 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12021 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12022 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12024 /* Finally just fetch it out of the MAC control regs. */
12026 hi = tr32(MAC_ADDR_0_HIGH);
12027 lo = tr32(MAC_ADDR_0_LOW);
12029 dev->dev_addr[5] = lo & 0xff;
12030 dev->dev_addr[4] = (lo >> 8) & 0xff;
12031 dev->dev_addr[3] = (lo >> 16) & 0xff;
12032 dev->dev_addr[2] = (lo >> 24) & 0xff;
12033 dev->dev_addr[1] = hi & 0xff;
12034 dev->dev_addr[0] = (hi >> 8) & 0xff;
12038 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12039 #ifdef CONFIG_SPARC
12040 if (!tg3_get_default_macaddr_sparc(tp))
12045 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12049 #define BOUNDARY_SINGLE_CACHELINE 1
12050 #define BOUNDARY_MULTI_CACHELINE 2
12052 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12054 int cacheline_size;
12058 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12060 cacheline_size = 1024;
12062 cacheline_size = (int) byte * 4;
12064 /* On 5703 and later chips, the boundary bits have no
12067 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12068 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12069 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12072 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12073 goal = BOUNDARY_MULTI_CACHELINE;
12075 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12076 goal = BOUNDARY_SINGLE_CACHELINE;
12085 /* PCI controllers on most RISC systems tend to disconnect
12086 * when a device tries to burst across a cache-line boundary.
12087 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12089 * Unfortunately, for PCI-E there are only limited
12090 * write-side controls for this, and thus for reads
12091 * we will still get the disconnects. We'll also waste
12092 * these PCI cycles for both read and write for chips
12093 * other than 5700 and 5701 which do not implement the
12096 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12097 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12098 switch (cacheline_size) {
12103 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12104 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12105 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12107 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12108 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12113 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12114 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12118 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12119 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12122 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12123 switch (cacheline_size) {
12127 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12128 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12129 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12135 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12136 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12140 switch (cacheline_size) {
12142 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12143 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12144 DMA_RWCTRL_WRITE_BNDRY_16);
12149 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12150 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12151 DMA_RWCTRL_WRITE_BNDRY_32);
12156 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12157 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12158 DMA_RWCTRL_WRITE_BNDRY_64);
12163 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12164 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12165 DMA_RWCTRL_WRITE_BNDRY_128);
12170 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12171 DMA_RWCTRL_WRITE_BNDRY_256);
12174 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12175 DMA_RWCTRL_WRITE_BNDRY_512);
12179 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12180 DMA_RWCTRL_WRITE_BNDRY_1024);
12189 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12191 struct tg3_internal_buffer_desc test_desc;
12192 u32 sram_dma_descs;
12195 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12197 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12198 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12199 tw32(RDMAC_STATUS, 0);
12200 tw32(WDMAC_STATUS, 0);
12202 tw32(BUFMGR_MODE, 0);
12203 tw32(FTQ_RESET, 0);
12205 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12206 test_desc.addr_lo = buf_dma & 0xffffffff;
12207 test_desc.nic_mbuf = 0x00002100;
12208 test_desc.len = size;
12211 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12212 * the *second* time the tg3 driver was getting loaded after an
12215 * Broadcom tells me:
12216 * ...the DMA engine is connected to the GRC block and a DMA
12217 * reset may affect the GRC block in some unpredictable way...
12218 * The behavior of resets to individual blocks has not been tested.
12220 * Broadcom noted the GRC reset will also reset all sub-components.
12223 test_desc.cqid_sqid = (13 << 8) | 2;
12225 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12228 test_desc.cqid_sqid = (16 << 8) | 7;
12230 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12233 test_desc.flags = 0x00000005;
12235 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12238 val = *(((u32 *)&test_desc) + i);
12239 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12240 sram_dma_descs + (i * sizeof(u32)));
12241 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12243 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12246 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12248 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12252 for (i = 0; i < 40; i++) {
12256 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12258 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12259 if ((val & 0xffff) == sram_dma_descs) {
12270 #define TEST_BUFFER_SIZE 0x2000
12272 static int __devinit tg3_test_dma(struct tg3 *tp)
12274 dma_addr_t buf_dma;
12275 u32 *buf, saved_dma_rwctrl;
12278 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12284 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12285 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12287 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12289 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12290 /* DMA read watermark not used on PCIE */
12291 tp->dma_rwctrl |= 0x00180000;
12292 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12293 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12295 tp->dma_rwctrl |= 0x003f0000;
12297 tp->dma_rwctrl |= 0x003f000f;
12299 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12300 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12301 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12302 u32 read_water = 0x7;
12304 /* If the 5704 is behind the EPB bridge, we can
12305 * do the less restrictive ONE_DMA workaround for
12306 * better performance.
12308 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12309 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12310 tp->dma_rwctrl |= 0x8000;
12311 else if (ccval == 0x6 || ccval == 0x7)
12312 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12314 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12316 /* Set bit 23 to enable PCIX hw bug fix */
12318 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12319 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12321 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12322 /* 5780 always in PCIX mode */
12323 tp->dma_rwctrl |= 0x00144000;
12324 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12325 /* 5714 always in PCIX mode */
12326 tp->dma_rwctrl |= 0x00148000;
12328 tp->dma_rwctrl |= 0x001b000f;
12332 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12333 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12334 tp->dma_rwctrl &= 0xfffffff0;
12336 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12338 /* Remove this if it causes problems for some boards. */
12339 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12341 /* On 5700/5701 chips, we need to set this bit.
12342 * Otherwise the chip will issue cacheline transactions
12343 * to streamable DMA memory with not all the byte
12344 * enables turned on. This is an error on several
12345 * RISC PCI controllers, in particular sparc64.
12347 * On 5703/5704 chips, this bit has been reassigned
12348 * a different meaning. In particular, it is used
12349 * on those chips to enable a PCI-X workaround.
12351 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12354 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12357 /* Unneeded, already done by tg3_get_invariants. */
12358 tg3_switch_clocks(tp);
12362 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12363 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12366 /* It is best to perform DMA test with maximum write burst size
12367 * to expose the 5700/5701 write DMA bug.
12369 saved_dma_rwctrl = tp->dma_rwctrl;
12370 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12371 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12376 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12379 /* Send the buffer to the chip. */
12380 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12382 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12387 /* validate data reached card RAM correctly. */
12388 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12390 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12391 if (le32_to_cpu(val) != p[i]) {
12392 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12393 /* ret = -ENODEV here? */
12398 /* Now read it back. */
12399 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12401 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12407 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12411 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12412 DMA_RWCTRL_WRITE_BNDRY_16) {
12413 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12414 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12415 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12418 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12424 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12430 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12431 DMA_RWCTRL_WRITE_BNDRY_16) {
12432 static struct pci_device_id dma_wait_state_chipsets[] = {
12433 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12434 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12438 /* DMA test passed without adjusting DMA boundary,
12439 * now look for chipsets that are known to expose the
12440 * DMA bug without failing the test.
12442 if (pci_dev_present(dma_wait_state_chipsets)) {
12443 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12444 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12447 /* Safe to use the calculated DMA boundary. */
12448 tp->dma_rwctrl = saved_dma_rwctrl;
12450 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12454 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12459 static void __devinit tg3_init_link_config(struct tg3 *tp)
12461 tp->link_config.advertising =
12462 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12463 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12464 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12465 ADVERTISED_Autoneg | ADVERTISED_MII);
12466 tp->link_config.speed = SPEED_INVALID;
12467 tp->link_config.duplex = DUPLEX_INVALID;
12468 tp->link_config.autoneg = AUTONEG_ENABLE;
12469 tp->link_config.active_speed = SPEED_INVALID;
12470 tp->link_config.active_duplex = DUPLEX_INVALID;
12471 tp->link_config.phy_is_low_power = 0;
12472 tp->link_config.orig_speed = SPEED_INVALID;
12473 tp->link_config.orig_duplex = DUPLEX_INVALID;
12474 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12477 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12479 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12480 tp->bufmgr_config.mbuf_read_dma_low_water =
12481 DEFAULT_MB_RDMA_LOW_WATER_5705;
12482 tp->bufmgr_config.mbuf_mac_rx_low_water =
12483 DEFAULT_MB_MACRX_LOW_WATER_5705;
12484 tp->bufmgr_config.mbuf_high_water =
12485 DEFAULT_MB_HIGH_WATER_5705;
12486 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12487 tp->bufmgr_config.mbuf_mac_rx_low_water =
12488 DEFAULT_MB_MACRX_LOW_WATER_5906;
12489 tp->bufmgr_config.mbuf_high_water =
12490 DEFAULT_MB_HIGH_WATER_5906;
12493 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12494 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12495 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12496 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12497 tp->bufmgr_config.mbuf_high_water_jumbo =
12498 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12500 tp->bufmgr_config.mbuf_read_dma_low_water =
12501 DEFAULT_MB_RDMA_LOW_WATER;
12502 tp->bufmgr_config.mbuf_mac_rx_low_water =
12503 DEFAULT_MB_MACRX_LOW_WATER;
12504 tp->bufmgr_config.mbuf_high_water =
12505 DEFAULT_MB_HIGH_WATER;
12507 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12508 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12509 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12510 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12511 tp->bufmgr_config.mbuf_high_water_jumbo =
12512 DEFAULT_MB_HIGH_WATER_JUMBO;
12515 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12516 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12519 static char * __devinit tg3_phy_string(struct tg3 *tp)
12521 switch (tp->phy_id & PHY_ID_MASK) {
12522 case PHY_ID_BCM5400: return "5400";
12523 case PHY_ID_BCM5401: return "5401";
12524 case PHY_ID_BCM5411: return "5411";
12525 case PHY_ID_BCM5701: return "5701";
12526 case PHY_ID_BCM5703: return "5703";
12527 case PHY_ID_BCM5704: return "5704";
12528 case PHY_ID_BCM5705: return "5705";
12529 case PHY_ID_BCM5750: return "5750";
12530 case PHY_ID_BCM5752: return "5752";
12531 case PHY_ID_BCM5714: return "5714";
12532 case PHY_ID_BCM5780: return "5780";
12533 case PHY_ID_BCM5755: return "5755";
12534 case PHY_ID_BCM5787: return "5787";
12535 case PHY_ID_BCM5784: return "5784";
12536 case PHY_ID_BCM5756: return "5722/5756";
12537 case PHY_ID_BCM5906: return "5906";
12538 case PHY_ID_BCM5761: return "5761";
12539 case PHY_ID_BCM8002: return "8002/serdes";
12540 case 0: return "serdes";
12541 default: return "unknown";
12545 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12547 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12548 strcpy(str, "PCI Express");
12550 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12551 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12553 strcpy(str, "PCIX:");
12555 if ((clock_ctrl == 7) ||
12556 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12557 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12558 strcat(str, "133MHz");
12559 else if (clock_ctrl == 0)
12560 strcat(str, "33MHz");
12561 else if (clock_ctrl == 2)
12562 strcat(str, "50MHz");
12563 else if (clock_ctrl == 4)
12564 strcat(str, "66MHz");
12565 else if (clock_ctrl == 6)
12566 strcat(str, "100MHz");
12568 strcpy(str, "PCI:");
12569 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12570 strcat(str, "66MHz");
12572 strcat(str, "33MHz");
12574 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12575 strcat(str, ":32-bit");
12577 strcat(str, ":64-bit");
12581 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12583 struct pci_dev *peer;
12584 unsigned int func, devnr = tp->pdev->devfn & ~7;
12586 for (func = 0; func < 8; func++) {
12587 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12588 if (peer && peer != tp->pdev)
12592 /* 5704 can be configured in single-port mode, set peer to
12593 * tp->pdev in that case.
12601 * We don't need to keep the refcount elevated; there's no way
12602 * to remove one half of this device without removing the other
12609 static void __devinit tg3_init_coal(struct tg3 *tp)
12611 struct ethtool_coalesce *ec = &tp->coal;
12613 memset(ec, 0, sizeof(*ec));
12614 ec->cmd = ETHTOOL_GCOALESCE;
12615 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12616 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12617 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12618 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12619 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12620 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12621 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12622 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12623 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12625 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12626 HOSTCC_MODE_CLRTICK_TXBD)) {
12627 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12628 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12629 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12630 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12633 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12634 ec->rx_coalesce_usecs_irq = 0;
12635 ec->tx_coalesce_usecs_irq = 0;
12636 ec->stats_block_coalesce_usecs = 0;
12640 static int __devinit tg3_init_one(struct pci_dev *pdev,
12641 const struct pci_device_id *ent)
12643 static int tg3_version_printed = 0;
12644 resource_size_t tg3reg_base;
12645 unsigned long tg3reg_len;
12646 struct net_device *dev;
12650 u64 dma_mask, persist_dma_mask;
12651 DECLARE_MAC_BUF(mac);
12653 if (tg3_version_printed++ == 0)
12654 printk(KERN_INFO "%s", version);
12656 err = pci_enable_device(pdev);
12658 printk(KERN_ERR PFX "Cannot enable PCI device, "
12663 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12664 printk(KERN_ERR PFX "Cannot find proper PCI device "
12665 "base address, aborting.\n");
12667 goto err_out_disable_pdev;
12670 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12672 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12674 goto err_out_disable_pdev;
12677 pci_set_master(pdev);
12679 /* Find power-management capability. */
12680 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12682 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12685 goto err_out_free_res;
12688 tg3reg_base = pci_resource_start(pdev, 0);
12689 tg3reg_len = pci_resource_len(pdev, 0);
12691 dev = alloc_etherdev(sizeof(*tp));
12693 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12695 goto err_out_free_res;
12698 SET_NETDEV_DEV(dev, &pdev->dev);
12700 #if TG3_VLAN_TAG_USED
12701 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12702 dev->vlan_rx_register = tg3_vlan_rx_register;
12705 tp = netdev_priv(dev);
12708 tp->pm_cap = pm_cap;
12709 tp->mac_mode = TG3_DEF_MAC_MODE;
12710 tp->rx_mode = TG3_DEF_RX_MODE;
12711 tp->tx_mode = TG3_DEF_TX_MODE;
12714 tp->msg_enable = tg3_debug;
12716 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12718 /* The word/byte swap controls here control register access byte
12719 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12722 tp->misc_host_ctrl =
12723 MISC_HOST_CTRL_MASK_PCI_INT |
12724 MISC_HOST_CTRL_WORD_SWAP |
12725 MISC_HOST_CTRL_INDIR_ACCESS |
12726 MISC_HOST_CTRL_PCISTATE_RW;
12728 /* The NONFRM (non-frame) byte/word swap controls take effect
12729 * on descriptor entries, anything which isn't packet data.
12731 * The StrongARM chips on the board (one for tx, one for rx)
12732 * are running in big-endian mode.
12734 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12735 GRC_MODE_WSWAP_NONFRM_DATA);
12736 #ifdef __BIG_ENDIAN
12737 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12739 spin_lock_init(&tp->lock);
12740 spin_lock_init(&tp->indirect_lock);
12741 INIT_WORK(&tp->reset_task, tg3_reset_task);
12743 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12745 printk(KERN_ERR PFX "Cannot map device registers, "
12748 goto err_out_free_dev;
12751 tg3_init_link_config(tp);
12753 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12754 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12755 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12757 dev->open = tg3_open;
12758 dev->stop = tg3_close;
12759 dev->get_stats = tg3_get_stats;
12760 dev->set_multicast_list = tg3_set_rx_mode;
12761 dev->set_mac_address = tg3_set_mac_addr;
12762 dev->do_ioctl = tg3_ioctl;
12763 dev->tx_timeout = tg3_tx_timeout;
12764 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12765 dev->ethtool_ops = &tg3_ethtool_ops;
12766 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12767 dev->change_mtu = tg3_change_mtu;
12768 dev->irq = pdev->irq;
12769 #ifdef CONFIG_NET_POLL_CONTROLLER
12770 dev->poll_controller = tg3_poll_controller;
12773 err = tg3_get_invariants(tp);
12775 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12777 goto err_out_iounmap;
12780 /* The EPB bridge inside 5714, 5715, and 5780 and any
12781 * device behind the EPB cannot support DMA addresses > 40-bit.
12782 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12783 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12784 * do DMA address check in tg3_start_xmit().
12786 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12787 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12788 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12789 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12790 #ifdef CONFIG_HIGHMEM
12791 dma_mask = DMA_64BIT_MASK;
12794 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12796 /* Configure DMA attributes. */
12797 if (dma_mask > DMA_32BIT_MASK) {
12798 err = pci_set_dma_mask(pdev, dma_mask);
12800 dev->features |= NETIF_F_HIGHDMA;
12801 err = pci_set_consistent_dma_mask(pdev,
12804 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12805 "DMA for consistent allocations\n");
12806 goto err_out_iounmap;
12810 if (err || dma_mask == DMA_32BIT_MASK) {
12811 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12813 printk(KERN_ERR PFX "No usable DMA configuration, "
12815 goto err_out_iounmap;
12819 tg3_init_bufmgr_config(tp);
12821 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12822 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12824 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12825 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12826 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12827 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12828 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12829 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12831 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12834 /* TSO is on by default on chips that support hardware TSO.
12835 * Firmware TSO on older chips gives lower performance, so it
12836 * is off by default, but can be enabled using ethtool.
12838 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12839 dev->features |= NETIF_F_TSO;
12840 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12841 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12842 dev->features |= NETIF_F_TSO6;
12843 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12844 dev->features |= NETIF_F_TSO_ECN;
12848 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12849 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12850 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12851 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12852 tp->rx_pending = 63;
12855 err = tg3_get_device_address(tp);
12857 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12859 goto err_out_iounmap;
12862 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12863 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12864 printk(KERN_ERR PFX "Cannot find proper PCI device "
12865 "base address for APE, aborting.\n");
12867 goto err_out_iounmap;
12870 tg3reg_base = pci_resource_start(pdev, 2);
12871 tg3reg_len = pci_resource_len(pdev, 2);
12873 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12874 if (!tp->aperegs) {
12875 printk(KERN_ERR PFX "Cannot map APE registers, "
12878 goto err_out_iounmap;
12881 tg3_ape_lock_init(tp);
12885 * Reset chip in case UNDI or EFI driver did not shutdown
12886 * DMA self test will enable WDMAC and we'll see (spurious)
12887 * pending DMA on the PCI bus at that point.
12889 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12890 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12891 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12892 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12895 err = tg3_test_dma(tp);
12897 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12898 goto err_out_apeunmap;
12901 /* Tigon3 can do ipv4 only... and some chips have buggy
12904 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12905 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12906 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12907 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12909 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12910 dev->features |= NETIF_F_IPV6_CSUM;
12912 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12914 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12916 /* flow control autonegotiation is default behavior */
12917 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12918 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
12922 pci_set_drvdata(pdev, dev);
12924 err = register_netdev(dev);
12926 printk(KERN_ERR PFX "Cannot register net device, "
12928 goto err_out_apeunmap;
12931 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12932 "(%s) %s Ethernet %s\n",
12934 tp->board_part_number,
12935 tp->pci_chip_rev_id,
12936 tg3_phy_string(tp),
12937 tg3_bus_string(tp, str),
12938 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12939 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12940 "10/100/1000Base-T")),
12941 print_mac(mac, dev->dev_addr));
12943 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12944 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12946 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12947 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12948 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12949 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12950 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12951 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12952 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12953 dev->name, tp->dma_rwctrl,
12954 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12955 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12961 iounmap(tp->aperegs);
12962 tp->aperegs = NULL;
12975 pci_release_regions(pdev);
12977 err_out_disable_pdev:
12978 pci_disable_device(pdev);
12979 pci_set_drvdata(pdev, NULL);
12983 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12985 struct net_device *dev = pci_get_drvdata(pdev);
12988 struct tg3 *tp = netdev_priv(dev);
12990 flush_scheduled_work();
12991 unregister_netdev(dev);
12993 iounmap(tp->aperegs);
12994 tp->aperegs = NULL;
13001 pci_release_regions(pdev);
13002 pci_disable_device(pdev);
13003 pci_set_drvdata(pdev, NULL);
13007 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13009 struct net_device *dev = pci_get_drvdata(pdev);
13010 struct tg3 *tp = netdev_priv(dev);
13013 /* PCI register 4 needs to be saved whether netif_running() or not.
13014 * MSI address and data need to be saved if using MSI and
13017 pci_save_state(pdev);
13019 if (!netif_running(dev))
13022 flush_scheduled_work();
13023 tg3_netif_stop(tp);
13025 del_timer_sync(&tp->timer);
13027 tg3_full_lock(tp, 1);
13028 tg3_disable_ints(tp);
13029 tg3_full_unlock(tp);
13031 netif_device_detach(dev);
13033 tg3_full_lock(tp, 0);
13034 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13035 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13036 tg3_full_unlock(tp);
13038 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13040 tg3_full_lock(tp, 0);
13042 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13043 if (tg3_restart_hw(tp, 1))
13046 tp->timer.expires = jiffies + tp->timer_offset;
13047 add_timer(&tp->timer);
13049 netif_device_attach(dev);
13050 tg3_netif_start(tp);
13053 tg3_full_unlock(tp);
13059 static int tg3_resume(struct pci_dev *pdev)
13061 struct net_device *dev = pci_get_drvdata(pdev);
13062 struct tg3 *tp = netdev_priv(dev);
13065 pci_restore_state(tp->pdev);
13067 if (!netif_running(dev))
13070 err = tg3_set_power_state(tp, PCI_D0);
13074 netif_device_attach(dev);
13076 tg3_full_lock(tp, 0);
13078 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13079 err = tg3_restart_hw(tp, 1);
13083 tp->timer.expires = jiffies + tp->timer_offset;
13084 add_timer(&tp->timer);
13086 tg3_netif_start(tp);
13089 tg3_full_unlock(tp);
13094 static struct pci_driver tg3_driver = {
13095 .name = DRV_MODULE_NAME,
13096 .id_table = tg3_pci_tbl,
13097 .probe = tg3_init_one,
13098 .remove = __devexit_p(tg3_remove_one),
13099 .suspend = tg3_suspend,
13100 .resume = tg3_resume
13103 static int __init tg3_init(void)
13105 return pci_register_driver(&tg3_driver);
13108 static void __exit tg3_cleanup(void)
13110 pci_unregister_driver(&tg3_driver);
13113 module_init(tg3_init);
13114 module_exit(tg3_cleanup);