2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey. It's neither supported nor endorsed
7 * by NVIDIA Corp. Use at your own risk.
9 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
10 * trademarks of NVIDIA Corporation in the United States and other
13 * Copyright (C) 2003,4 Manfred Spraul
14 * Copyright (C) 2004 Andrew de Quincey (wol support)
15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
16 * IRQ rate fixes, bigendian fixes, cleanups, verification)
17 * Copyright (c) 2004 NVIDIA Corporation
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 * 0.01: 05 Oct 2003: First release that compiles without warnings.
35 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
36 * Check all PCI BARs for the register window.
37 * udelay added to mii_rw.
38 * 0.03: 06 Oct 2003: Initialize dev->irq.
39 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
40 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
41 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
43 * 0.07: 14 Oct 2003: Further irq mask updates.
44 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
45 * added into irq handler, NULL check for drain_ring.
46 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
47 * requested interrupt sources.
48 * 0.10: 20 Oct 2003: First cleanup for release.
49 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
50 * MAC Address init fix, set_multicast cleanup.
51 * 0.12: 23 Oct 2003: Cleanups for release.
52 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
53 * Set link speed correctly. start rx before starting
54 * tx (nv_start_rx sets the link speed).
55 * 0.14: 25 Oct 2003: Nic dependant irq mask.
56 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
58 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
59 * increased to 1628 bytes.
60 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
62 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
63 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
64 * addresses, really stop rx if already running
65 * in nv_start_rx, clean up a bit.
66 * 0.20: 07 Dec 2003: alloc fixes
67 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
68 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
70 * 0.23: 26 Jan 2004: various small cleanups
71 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
72 * 0.25: 09 Mar 2004: wol support
73 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
74 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
75 * added CK804/MCP04 device IDs, code fixes
76 * for registers, link status and other minor fixes.
77 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
78 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
79 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
80 * into nv_close, otherwise reenabling for wol can
81 * cause DMA to kfree'd memory.
82 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
84 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
85 * 0.33: 16 Mai 2005: Support for MCP51 added.
88 * We suspect that on some hardware no TX done interrupts are generated.
89 * This means recovery from netif_stop_queue only happens if the hw timer
90 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
91 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
92 * If your hardware reliably generates tx done interrupts, then you can remove
93 * DEV_NEED_TIMERIRQ from the driver_data flags.
94 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
95 * superfluous timer interrupts from the nic.
97 #define FORCEDETH_VERSION "0.33"
98 #define DRV_NAME "forcedeth"
100 #include <linux/module.h>
101 #include <linux/types.h>
102 #include <linux/pci.h>
103 #include <linux/interrupt.h>
104 #include <linux/netdevice.h>
105 #include <linux/etherdevice.h>
106 #include <linux/delay.h>
107 #include <linux/spinlock.h>
108 #include <linux/ethtool.h>
109 #include <linux/timer.h>
110 #include <linux/skbuff.h>
111 #include <linux/mii.h>
112 #include <linux/random.h>
113 #include <linux/init.h>
114 #include <linux/if_vlan.h>
118 #include <asm/uaccess.h>
119 #include <asm/system.h>
122 #define dprintk printk
124 #define dprintk(x...) do { } while (0)
132 #define DEV_NEED_LASTPACKET1 0x0001 /* set LASTPACKET1 in tx flags */
133 #define DEV_IRQMASK_1 0x0002 /* use NVREG_IRQMASK_WANTED_1 for irq mask */
134 #define DEV_IRQMASK_2 0x0004 /* use NVREG_IRQMASK_WANTED_2 for irq mask */
135 #define DEV_NEED_TIMERIRQ 0x0008 /* set the timer irq flag in the irq mask */
136 #define DEV_NEED_LINKTIMER 0x0010 /* poll link settings. Relies on the timer irq */
139 NvRegIrqStatus = 0x000,
140 #define NVREG_IRQSTAT_MIIEVENT 0x040
141 #define NVREG_IRQSTAT_MASK 0x1ff
142 NvRegIrqMask = 0x004,
143 #define NVREG_IRQ_RX_ERROR 0x0001
144 #define NVREG_IRQ_RX 0x0002
145 #define NVREG_IRQ_RX_NOBUF 0x0004
146 #define NVREG_IRQ_TX_ERR 0x0008
147 #define NVREG_IRQ_TX2 0x0010
148 #define NVREG_IRQ_TIMER 0x0020
149 #define NVREG_IRQ_LINK 0x0040
150 #define NVREG_IRQ_TX1 0x0100
151 #define NVREG_IRQMASK_WANTED_1 0x005f
152 #define NVREG_IRQMASK_WANTED_2 0x0147
153 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1))
155 NvRegUnknownSetupReg6 = 0x008,
156 #define NVREG_UNKSETUP6_VAL 3
159 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
160 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
162 NvRegPollingInterval = 0x00c,
163 #define NVREG_POLL_DEFAULT 970
165 #define NVREG_MISC1_HD 0x02
166 #define NVREG_MISC1_FORCE 0x3b0f3c
168 NvRegTransmitterControl = 0x084,
169 #define NVREG_XMITCTL_START 0x01
170 NvRegTransmitterStatus = 0x088,
171 #define NVREG_XMITSTAT_BUSY 0x01
173 NvRegPacketFilterFlags = 0x8c,
174 #define NVREG_PFF_ALWAYS 0x7F0008
175 #define NVREG_PFF_PROMISC 0x80
176 #define NVREG_PFF_MYADDR 0x20
178 NvRegOffloadConfig = 0x90,
179 #define NVREG_OFFLOAD_HOMEPHY 0x601
180 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
181 NvRegReceiverControl = 0x094,
182 #define NVREG_RCVCTL_START 0x01
183 NvRegReceiverStatus = 0x98,
184 #define NVREG_RCVSTAT_BUSY 0x01
186 NvRegRandomSeed = 0x9c,
187 #define NVREG_RNDSEED_MASK 0x00ff
188 #define NVREG_RNDSEED_FORCE 0x7f00
189 #define NVREG_RNDSEED_FORCE2 0x2d00
190 #define NVREG_RNDSEED_FORCE3 0x7400
192 NvRegUnknownSetupReg1 = 0xA0,
193 #define NVREG_UNKSETUP1_VAL 0x16070f
194 NvRegUnknownSetupReg2 = 0xA4,
195 #define NVREG_UNKSETUP2_VAL 0x16
196 NvRegMacAddrA = 0xA8,
197 NvRegMacAddrB = 0xAC,
198 NvRegMulticastAddrA = 0xB0,
199 #define NVREG_MCASTADDRA_FORCE 0x01
200 NvRegMulticastAddrB = 0xB4,
201 NvRegMulticastMaskA = 0xB8,
202 NvRegMulticastMaskB = 0xBC,
204 NvRegPhyInterface = 0xC0,
205 #define PHY_RGMII 0x10000000
207 NvRegTxRingPhysAddr = 0x100,
208 NvRegRxRingPhysAddr = 0x104,
209 NvRegRingSizes = 0x108,
210 #define NVREG_RINGSZ_TXSHIFT 0
211 #define NVREG_RINGSZ_RXSHIFT 16
212 NvRegUnknownTransmitterReg = 0x10c,
213 NvRegLinkSpeed = 0x110,
214 #define NVREG_LINKSPEED_FORCE 0x10000
215 #define NVREG_LINKSPEED_10 1000
216 #define NVREG_LINKSPEED_100 100
217 #define NVREG_LINKSPEED_1000 50
218 #define NVREG_LINKSPEED_MASK (0xFFF)
219 NvRegUnknownSetupReg5 = 0x130,
220 #define NVREG_UNKSETUP5_BIT31 (1<<31)
221 NvRegUnknownSetupReg3 = 0x13c,
222 #define NVREG_UNKSETUP3_VAL1 0x200010
223 NvRegTxRxControl = 0x144,
224 #define NVREG_TXRXCTL_KICK 0x0001
225 #define NVREG_TXRXCTL_BIT1 0x0002
226 #define NVREG_TXRXCTL_BIT2 0x0004
227 #define NVREG_TXRXCTL_IDLE 0x0008
228 #define NVREG_TXRXCTL_RESET 0x0010
229 #define NVREG_TXRXCTL_RXCHECK 0x0400
230 NvRegMIIStatus = 0x180,
231 #define NVREG_MIISTAT_ERROR 0x0001
232 #define NVREG_MIISTAT_LINKCHANGE 0x0008
233 #define NVREG_MIISTAT_MASK 0x000f
234 #define NVREG_MIISTAT_MASK2 0x000f
235 NvRegUnknownSetupReg4 = 0x184,
236 #define NVREG_UNKSETUP4_VAL 8
238 NvRegAdapterControl = 0x188,
239 #define NVREG_ADAPTCTL_START 0x02
240 #define NVREG_ADAPTCTL_LINKUP 0x04
241 #define NVREG_ADAPTCTL_PHYVALID 0x40000
242 #define NVREG_ADAPTCTL_RUNNING 0x100000
243 #define NVREG_ADAPTCTL_PHYSHIFT 24
244 NvRegMIISpeed = 0x18c,
245 #define NVREG_MIISPEED_BIT8 (1<<8)
246 #define NVREG_MIIDELAY 5
247 NvRegMIIControl = 0x190,
248 #define NVREG_MIICTL_INUSE 0x08000
249 #define NVREG_MIICTL_WRITE 0x00400
250 #define NVREG_MIICTL_ADDRSHIFT 5
251 NvRegMIIData = 0x194,
252 NvRegWakeUpFlags = 0x200,
253 #define NVREG_WAKEUPFLAGS_VAL 0x7770
254 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
255 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
256 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
257 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
258 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
259 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
260 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
261 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
262 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
263 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
265 NvRegPatternCRC = 0x204,
266 NvRegPatternMask = 0x208,
267 NvRegPowerCap = 0x268,
268 #define NVREG_POWERCAP_D3SUPP (1<<30)
269 #define NVREG_POWERCAP_D2SUPP (1<<26)
270 #define NVREG_POWERCAP_D1SUPP (1<<25)
271 NvRegPowerState = 0x26c,
272 #define NVREG_POWERSTATE_POWEREDUP 0x8000
273 #define NVREG_POWERSTATE_VALID 0x0100
274 #define NVREG_POWERSTATE_MASK 0x0003
275 #define NVREG_POWERSTATE_D0 0x0000
276 #define NVREG_POWERSTATE_D1 0x0001
277 #define NVREG_POWERSTATE_D2 0x0002
278 #define NVREG_POWERSTATE_D3 0x0003
281 /* Big endian: should work, but is untested */
287 #define FLAG_MASK_V1 0xffff0000
288 #define FLAG_MASK_V2 0xffffc000
289 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
290 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
292 #define NV_TX_LASTPACKET (1<<16)
293 #define NV_TX_RETRYERROR (1<<19)
294 #define NV_TX_LASTPACKET1 (1<<24)
295 #define NV_TX_DEFERRED (1<<26)
296 #define NV_TX_CARRIERLOST (1<<27)
297 #define NV_TX_LATECOLLISION (1<<28)
298 #define NV_TX_UNDERFLOW (1<<29)
299 #define NV_TX_ERROR (1<<30)
300 #define NV_TX_VALID (1<<31)
302 #define NV_TX2_LASTPACKET (1<<29)
303 #define NV_TX2_RETRYERROR (1<<18)
304 #define NV_TX2_LASTPACKET1 (1<<23)
305 #define NV_TX2_DEFERRED (1<<25)
306 #define NV_TX2_CARRIERLOST (1<<26)
307 #define NV_TX2_LATECOLLISION (1<<27)
308 #define NV_TX2_UNDERFLOW (1<<28)
309 /* error and valid are the same for both */
310 #define NV_TX2_ERROR (1<<30)
311 #define NV_TX2_VALID (1<<31)
313 #define NV_RX_DESCRIPTORVALID (1<<16)
314 #define NV_RX_MISSEDFRAME (1<<17)
315 #define NV_RX_SUBSTRACT1 (1<<18)
316 #define NV_RX_ERROR1 (1<<23)
317 #define NV_RX_ERROR2 (1<<24)
318 #define NV_RX_ERROR3 (1<<25)
319 #define NV_RX_ERROR4 (1<<26)
320 #define NV_RX_CRCERR (1<<27)
321 #define NV_RX_OVERFLOW (1<<28)
322 #define NV_RX_FRAMINGERR (1<<29)
323 #define NV_RX_ERROR (1<<30)
324 #define NV_RX_AVAIL (1<<31)
326 #define NV_RX2_CHECKSUMMASK (0x1C000000)
327 #define NV_RX2_CHECKSUMOK1 (0x10000000)
328 #define NV_RX2_CHECKSUMOK2 (0x14000000)
329 #define NV_RX2_CHECKSUMOK3 (0x18000000)
330 #define NV_RX2_DESCRIPTORVALID (1<<29)
331 #define NV_RX2_SUBSTRACT1 (1<<25)
332 #define NV_RX2_ERROR1 (1<<18)
333 #define NV_RX2_ERROR2 (1<<19)
334 #define NV_RX2_ERROR3 (1<<20)
335 #define NV_RX2_ERROR4 (1<<21)
336 #define NV_RX2_CRCERR (1<<22)
337 #define NV_RX2_OVERFLOW (1<<23)
338 #define NV_RX2_FRAMINGERR (1<<24)
339 /* error and avail are the same for both */
340 #define NV_RX2_ERROR (1<<30)
341 #define NV_RX2_AVAIL (1<<31)
343 /* Miscelaneous hardware related defines: */
344 #define NV_PCI_REGSZ 0x270
346 /* various timeout delays: all in usec */
347 #define NV_TXRX_RESET_DELAY 4
348 #define NV_TXSTOP_DELAY1 10
349 #define NV_TXSTOP_DELAY1MAX 500000
350 #define NV_TXSTOP_DELAY2 100
351 #define NV_RXSTOP_DELAY1 10
352 #define NV_RXSTOP_DELAY1MAX 500000
353 #define NV_RXSTOP_DELAY2 100
354 #define NV_SETUP5_DELAY 5
355 #define NV_SETUP5_DELAYMAX 50000
356 #define NV_POWERUP_DELAY 5
357 #define NV_POWERUP_DELAYMAX 5000
358 #define NV_MIIBUSY_DELAY 50
359 #define NV_MIIPHY_DELAY 10
360 #define NV_MIIPHY_DELAYMAX 10000
362 #define NV_WAKEUPPATTERNS 5
363 #define NV_WAKEUPMASKENTRIES 4
365 /* General driver defaults */
366 #define NV_WATCHDOG_TIMEO (5*HZ)
371 * If your nic mysteriously hangs then try to reduce the limits
372 * to 1/0: It might be required to set NV_TX_LASTPACKET in the
373 * last valid ring entry. But this would be impossible to
374 * implement - probably a disassembly error.
376 #define TX_LIMIT_STOP 63
377 #define TX_LIMIT_START 62
379 /* rx/tx mac addr + type + vlan + align + slack*/
380 #define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64)
381 /* even more slack */
382 #define RX_ALLOC_BUFSIZE (ETH_DATA_LEN + 128)
384 #define OOM_REFILL (1+HZ/20)
385 #define POLL_WAIT (1+HZ/100)
386 #define LINK_TIMEOUT (3*HZ)
390 * This field has two purposes:
391 * - Newer nics uses a different ring layout. The layout is selected by
392 * comparing np->desc_ver with DESC_VER_xy.
393 * - It contains bits that are forced on when writing to NvRegTxRxControl.
395 #define DESC_VER_1 0x0
396 #define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK)
399 #define PHY_OUI_MARVELL 0x5043
400 #define PHY_OUI_CICADA 0x03f1
401 #define PHYID1_OUI_MASK 0x03ff
402 #define PHYID1_OUI_SHFT 6
403 #define PHYID2_OUI_MASK 0xfc00
404 #define PHYID2_OUI_SHFT 10
405 #define PHY_INIT1 0x0f000
406 #define PHY_INIT2 0x0e00
407 #define PHY_INIT3 0x01000
408 #define PHY_INIT4 0x0200
409 #define PHY_INIT5 0x0004
410 #define PHY_INIT6 0x02000
411 #define PHY_GIGABIT 0x0100
413 #define PHY_TIMEOUT 0x1
414 #define PHY_ERROR 0x2
418 #define PHY_HALF 0x100
420 /* FIXME: MII defines that should be added to <linux/mii.h> */
421 #define MII_1000BT_CR 0x09
422 #define MII_1000BT_SR 0x0a
423 #define ADVERTISE_1000FULL 0x0200
424 #define ADVERTISE_1000HALF 0x0100
425 #define LPA_1000FULL 0x0800
426 #define LPA_1000HALF 0x0400
431 * All hardware access under dev->priv->lock, except the performance
433 * - rx is (pseudo-) lockless: it relies on the single-threading provided
434 * by the arch code for interrupts.
435 * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
436 * needs dev->priv->lock :-(
437 * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
440 /* in dev: base, irq */
445 * Locking: spin_lock(&np->lock); */
446 struct net_device_stats stats;
454 unsigned int phy_oui;
457 /* General data: RO fields */
458 dma_addr_t ring_addr;
459 struct pci_dev *pci_dev;
466 /* rx specific fields.
467 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
469 struct ring_desc *rx_ring;
470 unsigned int cur_rx, refill_rx;
471 struct sk_buff *rx_skbuff[RX_RING];
472 dma_addr_t rx_dma[RX_RING];
473 unsigned int rx_buf_sz;
474 struct timer_list oom_kick;
475 struct timer_list nic_poll;
477 /* media detection workaround.
478 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
481 unsigned long link_timeout;
483 * tx specific fields.
485 struct ring_desc *tx_ring;
486 unsigned int next_tx, nic_tx;
487 struct sk_buff *tx_skbuff[TX_RING];
488 dma_addr_t tx_dma[TX_RING];
493 * Maximum number of loops until we assume that a bit in the irq mask
494 * is stuck. Overridable with module param.
496 static int max_interrupt_work = 5;
498 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
500 return netdev_priv(dev);
503 static inline u8 __iomem *get_hwbase(struct net_device *dev)
505 return get_nvpriv(dev)->base;
508 static inline void pci_push(u8 __iomem *base)
510 /* force out pending posted writes */
514 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
516 return le32_to_cpu(prd->FlagLen)
517 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
520 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
521 int delay, int delaymax, const char *msg)
523 u8 __iomem *base = get_hwbase(dev);
534 } while ((readl(base + offset) & mask) != target);
538 #define MII_READ (-1)
539 /* mii_rw: read/write a register on the PHY.
541 * Caller must guarantee serialization
543 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
545 u8 __iomem *base = get_hwbase(dev);
549 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
551 reg = readl(base + NvRegMIIControl);
552 if (reg & NVREG_MIICTL_INUSE) {
553 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
554 udelay(NV_MIIBUSY_DELAY);
557 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
558 if (value != MII_READ) {
559 writel(value, base + NvRegMIIData);
560 reg |= NVREG_MIICTL_WRITE;
562 writel(reg, base + NvRegMIIControl);
564 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
565 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
566 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
567 dev->name, miireg, addr);
569 } else if (value != MII_READ) {
570 /* it was a write operation - fewer failures are detectable */
571 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
572 dev->name, value, miireg, addr);
574 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
575 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
576 dev->name, miireg, addr);
579 retval = readl(base + NvRegMIIData);
580 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
581 dev->name, miireg, addr, retval);
587 static int phy_reset(struct net_device *dev)
589 struct fe_priv *np = get_nvpriv(dev);
591 unsigned int tries = 0;
593 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
594 miicontrol |= BMCR_RESET;
595 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
602 /* must wait till reset is deasserted */
603 while (miicontrol & BMCR_RESET) {
605 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
606 /* FIXME: 100 tries seem excessive */
613 static int phy_init(struct net_device *dev)
615 struct fe_priv *np = get_nvpriv(dev);
616 u8 __iomem *base = get_hwbase(dev);
617 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
619 /* set advertise register */
620 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
621 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
622 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
623 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
627 /* get phy interface type */
628 phyinterface = readl(base + NvRegPhyInterface);
630 /* see if gigabit phy */
631 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
632 if (mii_status & PHY_GIGABIT) {
633 np->gigabit = PHY_GIGABIT;
634 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
635 mii_control_1000 &= ~ADVERTISE_1000HALF;
636 if (phyinterface & PHY_RGMII)
637 mii_control_1000 |= ADVERTISE_1000FULL;
639 mii_control_1000 &= ~ADVERTISE_1000FULL;
641 if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) {
642 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
650 if (phy_reset(dev)) {
651 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
655 /* phy vendor specific configuration */
656 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
657 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
658 phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
659 phy_reserved |= (PHY_INIT3 | PHY_INIT4);
660 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
661 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
664 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
665 phy_reserved |= PHY_INIT5;
666 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
667 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
671 if (np->phy_oui == PHY_OUI_CICADA) {
672 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
673 phy_reserved |= PHY_INIT6;
674 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
675 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
680 /* restart auto negotiation */
681 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
682 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
683 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
690 static void nv_start_rx(struct net_device *dev)
692 struct fe_priv *np = get_nvpriv(dev);
693 u8 __iomem *base = get_hwbase(dev);
695 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
696 /* Already running? Stop it. */
697 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
698 writel(0, base + NvRegReceiverControl);
701 writel(np->linkspeed, base + NvRegLinkSpeed);
703 writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
704 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
705 dev->name, np->duplex, np->linkspeed);
709 static void nv_stop_rx(struct net_device *dev)
711 u8 __iomem *base = get_hwbase(dev);
713 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
714 writel(0, base + NvRegReceiverControl);
715 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
716 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
717 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
719 udelay(NV_RXSTOP_DELAY2);
720 writel(0, base + NvRegLinkSpeed);
723 static void nv_start_tx(struct net_device *dev)
725 u8 __iomem *base = get_hwbase(dev);
727 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
728 writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
732 static void nv_stop_tx(struct net_device *dev)
734 u8 __iomem *base = get_hwbase(dev);
736 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
737 writel(0, base + NvRegTransmitterControl);
738 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
739 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
740 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
742 udelay(NV_TXSTOP_DELAY2);
743 writel(0, base + NvRegUnknownTransmitterReg);
746 static void nv_txrx_reset(struct net_device *dev)
748 struct fe_priv *np = get_nvpriv(dev);
749 u8 __iomem *base = get_hwbase(dev);
751 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
752 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->desc_ver, base + NvRegTxRxControl);
754 udelay(NV_TXRX_RESET_DELAY);
755 writel(NVREG_TXRXCTL_BIT2 | np->desc_ver, base + NvRegTxRxControl);
760 * nv_get_stats: dev->get_stats function
761 * Get latest stats value from the nic.
762 * Called with read_lock(&dev_base_lock) held for read -
763 * only synchronized against unregister_netdevice.
765 static struct net_device_stats *nv_get_stats(struct net_device *dev)
767 struct fe_priv *np = get_nvpriv(dev);
769 /* It seems that the nic always generates interrupts and doesn't
770 * accumulate errors internally. Thus the current values in np->stats
771 * are already up to date.
777 * nv_alloc_rx: fill rx ring entries.
778 * Return 1 if the allocations for the skbs failed and the
779 * rx engine is without Available descriptors
781 static int nv_alloc_rx(struct net_device *dev)
783 struct fe_priv *np = get_nvpriv(dev);
784 unsigned int refill_rx = np->refill_rx;
787 while (np->cur_rx != refill_rx) {
790 nr = refill_rx % RX_RING;
791 if (np->rx_skbuff[nr] == NULL) {
793 skb = dev_alloc_skb(RX_ALLOC_BUFSIZE);
798 np->rx_skbuff[nr] = skb;
800 skb = np->rx_skbuff[nr];
802 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len,
804 np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
806 np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL);
807 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
808 dev->name, refill_rx);
811 np->refill_rx = refill_rx;
812 if (np->cur_rx - refill_rx == RX_RING)
817 static void nv_do_rx_refill(unsigned long data)
819 struct net_device *dev = (struct net_device *) data;
820 struct fe_priv *np = get_nvpriv(dev);
822 disable_irq(dev->irq);
823 if (nv_alloc_rx(dev)) {
824 spin_lock(&np->lock);
825 if (!np->in_shutdown)
826 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
827 spin_unlock(&np->lock);
829 enable_irq(dev->irq);
832 static int nv_init_ring(struct net_device *dev)
834 struct fe_priv *np = get_nvpriv(dev);
837 np->next_tx = np->nic_tx = 0;
838 for (i = 0; i < TX_RING; i++)
839 np->tx_ring[i].FlagLen = 0;
841 np->cur_rx = RX_RING;
843 for (i = 0; i < RX_RING; i++)
844 np->rx_ring[i].FlagLen = 0;
845 return nv_alloc_rx(dev);
848 static void nv_drain_tx(struct net_device *dev)
850 struct fe_priv *np = get_nvpriv(dev);
852 for (i = 0; i < TX_RING; i++) {
853 np->tx_ring[i].FlagLen = 0;
854 if (np->tx_skbuff[i]) {
855 pci_unmap_single(np->pci_dev, np->tx_dma[i],
856 np->tx_skbuff[i]->len,
858 dev_kfree_skb(np->tx_skbuff[i]);
859 np->tx_skbuff[i] = NULL;
860 np->stats.tx_dropped++;
865 static void nv_drain_rx(struct net_device *dev)
867 struct fe_priv *np = get_nvpriv(dev);
869 for (i = 0; i < RX_RING; i++) {
870 np->rx_ring[i].FlagLen = 0;
872 if (np->rx_skbuff[i]) {
873 pci_unmap_single(np->pci_dev, np->rx_dma[i],
874 np->rx_skbuff[i]->len,
876 dev_kfree_skb(np->rx_skbuff[i]);
877 np->rx_skbuff[i] = NULL;
882 static void drain_ring(struct net_device *dev)
889 * nv_start_xmit: dev->hard_start_xmit function
890 * Called with dev->xmit_lock held.
892 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
894 struct fe_priv *np = get_nvpriv(dev);
895 int nr = np->next_tx % TX_RING;
897 np->tx_skbuff[nr] = skb;
898 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len,
901 np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
903 spin_lock_irq(&np->lock);
905 np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
906 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n",
907 dev->name, np->next_tx);
910 for (j=0; j<64; j++) {
912 dprintk("\n%03x:", j);
913 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
920 dev->trans_start = jiffies;
921 if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP)
922 netif_stop_queue(dev);
923 spin_unlock_irq(&np->lock);
924 writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl);
925 pci_push(get_hwbase(dev));
930 * nv_tx_done: check for completed packets, release the skbs.
932 * Caller must own np->lock.
934 static void nv_tx_done(struct net_device *dev)
936 struct fe_priv *np = get_nvpriv(dev);
940 while (np->nic_tx != np->next_tx) {
941 i = np->nic_tx % TX_RING;
943 Flags = le32_to_cpu(np->tx_ring[i].FlagLen);
945 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
946 dev->name, np->nic_tx, Flags);
947 if (Flags & NV_TX_VALID)
949 if (np->desc_ver == DESC_VER_1) {
950 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
951 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
952 if (Flags & NV_TX_UNDERFLOW)
953 np->stats.tx_fifo_errors++;
954 if (Flags & NV_TX_CARRIERLOST)
955 np->stats.tx_carrier_errors++;
956 np->stats.tx_errors++;
958 np->stats.tx_packets++;
959 np->stats.tx_bytes += np->tx_skbuff[i]->len;
962 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
963 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
964 if (Flags & NV_TX2_UNDERFLOW)
965 np->stats.tx_fifo_errors++;
966 if (Flags & NV_TX2_CARRIERLOST)
967 np->stats.tx_carrier_errors++;
968 np->stats.tx_errors++;
970 np->stats.tx_packets++;
971 np->stats.tx_bytes += np->tx_skbuff[i]->len;
974 pci_unmap_single(np->pci_dev, np->tx_dma[i],
975 np->tx_skbuff[i]->len,
977 dev_kfree_skb_irq(np->tx_skbuff[i]);
978 np->tx_skbuff[i] = NULL;
981 if (np->next_tx - np->nic_tx < TX_LIMIT_START)
982 netif_wake_queue(dev);
986 * nv_tx_timeout: dev->tx_timeout function
987 * Called with dev->xmit_lock held.
989 static void nv_tx_timeout(struct net_device *dev)
991 struct fe_priv *np = get_nvpriv(dev);
992 u8 __iomem *base = get_hwbase(dev);
994 dprintk(KERN_DEBUG "%s: Got tx_timeout. irq: %08x\n", dev->name,
995 readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
997 spin_lock_irq(&np->lock);
999 /* 1) stop tx engine */
1002 /* 2) check that the packets were not sent already: */
1005 /* 3) if there are dead entries: clear everything */
1006 if (np->next_tx != np->nic_tx) {
1007 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1009 np->next_tx = np->nic_tx = 0;
1010 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1011 netif_wake_queue(dev);
1014 /* 4) restart tx engine */
1016 spin_unlock_irq(&np->lock);
1020 * Called when the nic notices a mismatch between the actual data len on the
1021 * wire and the len indicated in the 802 header
1023 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1025 int hdrlen; /* length of the 802 header */
1026 int protolen; /* length as stored in the proto field */
1028 /* 1) calculate len according to header */
1029 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
1030 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
1033 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
1036 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
1037 dev->name, datalen, protolen, hdrlen);
1038 if (protolen > ETH_DATA_LEN)
1039 return datalen; /* Value in proto field not a len, no checks possible */
1042 /* consistency checks: */
1043 if (datalen > ETH_ZLEN) {
1044 if (datalen >= protolen) {
1045 /* more data on wire than in 802 header, trim of
1048 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
1049 dev->name, protolen);
1052 /* less data on wire than mentioned in header.
1053 * Discard the packet.
1055 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
1060 /* short packet. Accept only if 802 values are also short */
1061 if (protolen > ETH_ZLEN) {
1062 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
1066 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
1067 dev->name, datalen);
1072 static void nv_rx_process(struct net_device *dev)
1074 struct fe_priv *np = get_nvpriv(dev);
1078 struct sk_buff *skb;
1081 if (np->cur_rx - np->refill_rx >= RX_RING)
1082 break; /* we scanned the whole ring - do not continue */
1084 i = np->cur_rx % RX_RING;
1085 Flags = le32_to_cpu(np->rx_ring[i].FlagLen);
1086 len = nv_descr_getlength(&np->rx_ring[i], np->desc_ver);
1088 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
1089 dev->name, np->cur_rx, Flags);
1091 if (Flags & NV_RX_AVAIL)
1092 break; /* still owned by hardware, */
1095 * the packet is for us - immediately tear down the pci mapping.
1096 * TODO: check if a prefetch of the first cacheline improves
1099 pci_unmap_single(np->pci_dev, np->rx_dma[i],
1100 np->rx_skbuff[i]->len,
1101 PCI_DMA_FROMDEVICE);
1105 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags);
1106 for (j=0; j<64; j++) {
1108 dprintk("\n%03x:", j);
1109 dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]);
1113 /* look at what we actually got: */
1114 if (np->desc_ver == DESC_VER_1) {
1115 if (!(Flags & NV_RX_DESCRIPTORVALID))
1118 if (Flags & NV_RX_MISSEDFRAME) {
1119 np->stats.rx_missed_errors++;
1120 np->stats.rx_errors++;
1123 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1124 np->stats.rx_errors++;
1127 if (Flags & NV_RX_CRCERR) {
1128 np->stats.rx_crc_errors++;
1129 np->stats.rx_errors++;
1132 if (Flags & NV_RX_OVERFLOW) {
1133 np->stats.rx_over_errors++;
1134 np->stats.rx_errors++;
1137 if (Flags & NV_RX_ERROR4) {
1138 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1140 np->stats.rx_errors++;
1144 /* framing errors are soft errors. */
1145 if (Flags & NV_RX_FRAMINGERR) {
1146 if (Flags & NV_RX_SUBSTRACT1) {
1151 if (!(Flags & NV_RX2_DESCRIPTORVALID))
1154 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
1155 np->stats.rx_errors++;
1158 if (Flags & NV_RX2_CRCERR) {
1159 np->stats.rx_crc_errors++;
1160 np->stats.rx_errors++;
1163 if (Flags & NV_RX2_OVERFLOW) {
1164 np->stats.rx_over_errors++;
1165 np->stats.rx_errors++;
1168 if (Flags & NV_RX2_ERROR4) {
1169 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1171 np->stats.rx_errors++;
1175 /* framing errors are soft errors */
1176 if (Flags & NV_RX2_FRAMINGERR) {
1177 if (Flags & NV_RX2_SUBSTRACT1) {
1181 Flags &= NV_RX2_CHECKSUMMASK;
1182 if (Flags == NV_RX2_CHECKSUMOK1 ||
1183 Flags == NV_RX2_CHECKSUMOK2 ||
1184 Flags == NV_RX2_CHECKSUMOK3) {
1185 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
1186 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
1188 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
1191 /* got a valid packet - forward it to the network core */
1192 skb = np->rx_skbuff[i];
1193 np->rx_skbuff[i] = NULL;
1196 skb->protocol = eth_type_trans(skb, dev);
1197 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1198 dev->name, np->cur_rx, len, skb->protocol);
1200 dev->last_rx = jiffies;
1201 np->stats.rx_packets++;
1202 np->stats.rx_bytes += len;
1209 * nv_change_mtu: dev->change_mtu function
1210 * Called with dev_base_lock held for read.
1212 static int nv_change_mtu(struct net_device *dev, int new_mtu)
1214 if (new_mtu > ETH_DATA_LEN)
1221 * nv_set_multicast: dev->set_multicast function
1222 * Called with dev->xmit_lock held.
1224 static void nv_set_multicast(struct net_device *dev)
1226 struct fe_priv *np = get_nvpriv(dev);
1227 u8 __iomem *base = get_hwbase(dev);
1232 memset(addr, 0, sizeof(addr));
1233 memset(mask, 0, sizeof(mask));
1235 if (dev->flags & IFF_PROMISC) {
1236 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1237 pff = NVREG_PFF_PROMISC;
1239 pff = NVREG_PFF_MYADDR;
1241 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
1245 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
1246 if (dev->flags & IFF_ALLMULTI) {
1247 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
1249 struct dev_mc_list *walk;
1251 walk = dev->mc_list;
1252 while (walk != NULL) {
1254 a = le32_to_cpu(*(u32 *) walk->dmi_addr);
1255 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
1263 addr[0] = alwaysOn[0];
1264 addr[1] = alwaysOn[1];
1265 mask[0] = alwaysOn[0] | alwaysOff[0];
1266 mask[1] = alwaysOn[1] | alwaysOff[1];
1269 addr[0] |= NVREG_MCASTADDRA_FORCE;
1270 pff |= NVREG_PFF_ALWAYS;
1271 spin_lock_irq(&np->lock);
1273 writel(addr[0], base + NvRegMulticastAddrA);
1274 writel(addr[1], base + NvRegMulticastAddrB);
1275 writel(mask[0], base + NvRegMulticastMaskA);
1276 writel(mask[1], base + NvRegMulticastMaskB);
1277 writel(pff, base + NvRegPacketFilterFlags);
1278 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
1281 spin_unlock_irq(&np->lock);
1284 static int nv_update_linkspeed(struct net_device *dev)
1286 struct fe_priv *np = get_nvpriv(dev);
1287 u8 __iomem *base = get_hwbase(dev);
1289 int newls = np->linkspeed;
1290 int newdup = np->duplex;
1293 u32 control_1000, status_1000, phyreg;
1295 /* BMSR_LSTATUS is latched, read it twice:
1296 * we want the current value.
1298 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1299 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1301 if (!(mii_status & BMSR_LSTATUS)) {
1302 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
1304 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1310 if (np->autoneg == 0) {
1311 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
1312 dev->name, np->fixed_mode);
1313 if (np->fixed_mode & LPA_100FULL) {
1314 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1316 } else if (np->fixed_mode & LPA_100HALF) {
1317 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1319 } else if (np->fixed_mode & LPA_10FULL) {
1320 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1323 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1329 /* check auto negotiation is complete */
1330 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
1331 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
1332 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1335 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
1340 if (np->gigabit == PHY_GIGABIT) {
1341 control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1342 status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ);
1344 if ((control_1000 & ADVERTISE_1000FULL) &&
1345 (status_1000 & LPA_1000FULL)) {
1346 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
1348 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
1354 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1355 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
1356 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
1357 dev->name, adv, lpa);
1359 /* FIXME: handle parallel detection properly */
1361 if (lpa & LPA_100FULL) {
1362 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1364 } else if (lpa & LPA_100HALF) {
1365 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1367 } else if (lpa & LPA_10FULL) {
1368 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1370 } else if (lpa & LPA_10HALF) {
1371 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1374 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa);
1375 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1380 if (np->duplex == newdup && np->linkspeed == newls)
1383 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
1384 dev->name, np->linkspeed, np->duplex, newls, newdup);
1386 np->duplex = newdup;
1387 np->linkspeed = newls;
1389 if (np->gigabit == PHY_GIGABIT) {
1390 phyreg = readl(base + NvRegRandomSeed);
1391 phyreg &= ~(0x3FF00);
1392 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
1393 phyreg |= NVREG_RNDSEED_FORCE3;
1394 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
1395 phyreg |= NVREG_RNDSEED_FORCE2;
1396 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
1397 phyreg |= NVREG_RNDSEED_FORCE;
1398 writel(phyreg, base + NvRegRandomSeed);
1401 phyreg = readl(base + NvRegPhyInterface);
1402 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
1403 if (np->duplex == 0)
1405 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
1407 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
1409 writel(phyreg, base + NvRegPhyInterface);
1411 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
1414 writel(np->linkspeed, base + NvRegLinkSpeed);
1420 static void nv_linkchange(struct net_device *dev)
1422 if (nv_update_linkspeed(dev)) {
1423 if (netif_carrier_ok(dev)) {
1426 netif_carrier_on(dev);
1427 printk(KERN_INFO "%s: link up.\n", dev->name);
1431 if (netif_carrier_ok(dev)) {
1432 netif_carrier_off(dev);
1433 printk(KERN_INFO "%s: link down.\n", dev->name);
1439 static void nv_link_irq(struct net_device *dev)
1441 u8 __iomem *base = get_hwbase(dev);
1444 miistat = readl(base + NvRegMIIStatus);
1445 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1446 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
1448 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
1450 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
1453 static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1455 struct net_device *dev = (struct net_device *) data;
1456 struct fe_priv *np = get_nvpriv(dev);
1457 u8 __iomem *base = get_hwbase(dev);
1461 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
1464 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1465 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1467 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
1468 if (!(events & np->irqmask))
1471 if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX2|NVREG_IRQ_TX_ERR)) {
1472 spin_lock(&np->lock);
1474 spin_unlock(&np->lock);
1477 if (events & (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF)) {
1479 if (nv_alloc_rx(dev)) {
1480 spin_lock(&np->lock);
1481 if (!np->in_shutdown)
1482 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1483 spin_unlock(&np->lock);
1487 if (events & NVREG_IRQ_LINK) {
1488 spin_lock(&np->lock);
1490 spin_unlock(&np->lock);
1492 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
1493 spin_lock(&np->lock);
1495 spin_unlock(&np->lock);
1496 np->link_timeout = jiffies + LINK_TIMEOUT;
1498 if (events & (NVREG_IRQ_TX_ERR)) {
1499 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
1502 if (events & (NVREG_IRQ_UNKNOWN)) {
1503 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
1506 if (i > max_interrupt_work) {
1507 spin_lock(&np->lock);
1508 /* disable interrupts on the nic */
1509 writel(0, base + NvRegIrqMask);
1512 if (!np->in_shutdown)
1513 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
1514 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
1515 spin_unlock(&np->lock);
1520 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
1522 return IRQ_RETVAL(i);
1525 static void nv_do_nic_poll(unsigned long data)
1527 struct net_device *dev = (struct net_device *) data;
1528 struct fe_priv *np = get_nvpriv(dev);
1529 u8 __iomem *base = get_hwbase(dev);
1531 disable_irq(dev->irq);
1532 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
1534 * reenable interrupts on the nic, we have to do this before calling
1535 * nv_nic_irq because that may decide to do otherwise
1537 writel(np->irqmask, base + NvRegIrqMask);
1539 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
1540 enable_irq(dev->irq);
1543 #ifdef CONFIG_NET_POLL_CONTROLLER
1544 static void nv_poll_controller(struct net_device *dev)
1546 nv_do_nic_poll((unsigned long) dev);
1550 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1552 struct fe_priv *np = get_nvpriv(dev);
1553 strcpy(info->driver, "forcedeth");
1554 strcpy(info->version, FORCEDETH_VERSION);
1555 strcpy(info->bus_info, pci_name(np->pci_dev));
1558 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
1560 struct fe_priv *np = get_nvpriv(dev);
1561 wolinfo->supported = WAKE_MAGIC;
1563 spin_lock_irq(&np->lock);
1565 wolinfo->wolopts = WAKE_MAGIC;
1566 spin_unlock_irq(&np->lock);
1569 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
1571 struct fe_priv *np = get_nvpriv(dev);
1572 u8 __iomem *base = get_hwbase(dev);
1574 spin_lock_irq(&np->lock);
1575 if (wolinfo->wolopts == 0) {
1576 writel(0, base + NvRegWakeUpFlags);
1579 if (wolinfo->wolopts & WAKE_MAGIC) {
1580 writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
1583 spin_unlock_irq(&np->lock);
1587 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1589 struct fe_priv *np = netdev_priv(dev);
1592 spin_lock_irq(&np->lock);
1593 ecmd->port = PORT_MII;
1594 if (!netif_running(dev)) {
1595 /* We do not track link speed / duplex setting if the
1596 * interface is disabled. Force a link check */
1597 nv_update_linkspeed(dev);
1599 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
1600 case NVREG_LINKSPEED_10:
1601 ecmd->speed = SPEED_10;
1603 case NVREG_LINKSPEED_100:
1604 ecmd->speed = SPEED_100;
1606 case NVREG_LINKSPEED_1000:
1607 ecmd->speed = SPEED_1000;
1610 ecmd->duplex = DUPLEX_HALF;
1612 ecmd->duplex = DUPLEX_FULL;
1614 ecmd->autoneg = np->autoneg;
1616 ecmd->advertising = ADVERTISED_MII;
1618 ecmd->advertising |= ADVERTISED_Autoneg;
1619 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1621 adv = np->fixed_mode;
1623 if (adv & ADVERTISE_10HALF)
1624 ecmd->advertising |= ADVERTISED_10baseT_Half;
1625 if (adv & ADVERTISE_10FULL)
1626 ecmd->advertising |= ADVERTISED_10baseT_Full;
1627 if (adv & ADVERTISE_100HALF)
1628 ecmd->advertising |= ADVERTISED_100baseT_Half;
1629 if (adv & ADVERTISE_100FULL)
1630 ecmd->advertising |= ADVERTISED_100baseT_Full;
1631 if (np->autoneg && np->gigabit == PHY_GIGABIT) {
1632 adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1633 if (adv & ADVERTISE_1000FULL)
1634 ecmd->advertising |= ADVERTISED_1000baseT_Full;
1637 ecmd->supported = (SUPPORTED_Autoneg |
1638 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
1639 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1641 if (np->gigabit == PHY_GIGABIT)
1642 ecmd->supported |= SUPPORTED_1000baseT_Full;
1644 ecmd->phy_address = np->phyaddr;
1645 ecmd->transceiver = XCVR_EXTERNAL;
1647 /* ignore maxtxpkt, maxrxpkt for now */
1648 spin_unlock_irq(&np->lock);
1652 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1654 struct fe_priv *np = netdev_priv(dev);
1656 if (ecmd->port != PORT_MII)
1658 if (ecmd->transceiver != XCVR_EXTERNAL)
1660 if (ecmd->phy_address != np->phyaddr) {
1661 /* TODO: support switching between multiple phys. Should be
1662 * trivial, but not enabled due to lack of test hardware. */
1665 if (ecmd->autoneg == AUTONEG_ENABLE) {
1668 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1669 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
1670 if (np->gigabit == PHY_GIGABIT)
1671 mask |= ADVERTISED_1000baseT_Full;
1673 if ((ecmd->advertising & mask) == 0)
1676 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
1677 /* Note: autonegotiation disable, speed 1000 intentionally
1678 * forbidden - noone should need that. */
1680 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
1682 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1688 spin_lock_irq(&np->lock);
1689 if (ecmd->autoneg == AUTONEG_ENABLE) {
1694 /* advertise only what has been requested */
1695 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1696 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
1697 if (ecmd->advertising & ADVERTISED_10baseT_Half)
1698 adv |= ADVERTISE_10HALF;
1699 if (ecmd->advertising & ADVERTISED_10baseT_Full)
1700 adv |= ADVERTISE_10FULL;
1701 if (ecmd->advertising & ADVERTISED_100baseT_Half)
1702 adv |= ADVERTISE_100HALF;
1703 if (ecmd->advertising & ADVERTISED_100baseT_Full)
1704 adv |= ADVERTISE_100FULL;
1705 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
1707 if (np->gigabit == PHY_GIGABIT) {
1708 adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1709 adv &= ~ADVERTISE_1000FULL;
1710 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
1711 adv |= ADVERTISE_1000FULL;
1712 mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
1715 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1716 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1717 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
1724 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1725 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
1726 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
1727 adv |= ADVERTISE_10HALF;
1728 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
1729 adv |= ADVERTISE_10FULL;
1730 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
1731 adv |= ADVERTISE_100HALF;
1732 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
1733 adv |= ADVERTISE_100FULL;
1734 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
1735 np->fixed_mode = adv;
1737 if (np->gigabit == PHY_GIGABIT) {
1738 adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1739 adv &= ~ADVERTISE_1000FULL;
1740 mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
1743 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1744 bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
1745 if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
1746 bmcr |= BMCR_FULLDPLX;
1747 if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
1748 bmcr |= BMCR_SPEED100;
1749 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
1751 if (netif_running(dev)) {
1752 /* Wait a bit and then reconfigure the nic. */
1757 spin_unlock_irq(&np->lock);
1762 static struct ethtool_ops ops = {
1763 .get_drvinfo = nv_get_drvinfo,
1764 .get_link = ethtool_op_get_link,
1765 .get_wol = nv_get_wol,
1766 .set_wol = nv_set_wol,
1767 .get_settings = nv_get_settings,
1768 .set_settings = nv_set_settings,
1771 static int nv_open(struct net_device *dev)
1773 struct fe_priv *np = get_nvpriv(dev);
1774 u8 __iomem *base = get_hwbase(dev);
1777 dprintk(KERN_DEBUG "nv_open: begin\n");
1779 /* 1) erase previous misconfiguration */
1780 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
1781 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
1782 writel(0, base + NvRegMulticastAddrB);
1783 writel(0, base + NvRegMulticastMaskA);
1784 writel(0, base + NvRegMulticastMaskB);
1785 writel(0, base + NvRegPacketFilterFlags);
1787 writel(0, base + NvRegTransmitterControl);
1788 writel(0, base + NvRegReceiverControl);
1790 writel(0, base + NvRegAdapterControl);
1792 /* 2) initialize descriptor rings */
1793 oom = nv_init_ring(dev);
1795 writel(0, base + NvRegLinkSpeed);
1796 writel(0, base + NvRegUnknownTransmitterReg);
1798 writel(0, base + NvRegUnknownSetupReg6);
1800 np->in_shutdown = 0;
1802 /* 3) set mac address */
1806 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
1807 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1808 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1810 writel(mac[0], base + NvRegMacAddrA);
1811 writel(mac[1], base + NvRegMacAddrB);
1814 /* 4) give hw rings */
1815 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
1816 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1817 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1818 base + NvRegRingSizes);
1820 /* 5) continue setup */
1821 writel(np->linkspeed, base + NvRegLinkSpeed);
1822 writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
1823 writel(np->desc_ver, base + NvRegTxRxControl);
1825 writel(NVREG_TXRXCTL_BIT1|np->desc_ver, base + NvRegTxRxControl);
1826 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
1827 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
1828 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
1830 writel(0, base + NvRegUnknownSetupReg4);
1831 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1832 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
1834 /* 6) continue setup */
1835 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
1836 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
1837 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
1838 writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig);
1840 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
1841 get_random_bytes(&i, sizeof(i));
1842 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
1843 writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
1844 writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
1845 writel(NVREG_POLL_DEFAULT, base + NvRegPollingInterval);
1846 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
1847 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
1848 base + NvRegAdapterControl);
1849 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
1850 writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
1851 writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
1853 i = readl(base + NvRegPowerState);
1854 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
1855 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
1859 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
1861 writel(0, base + NvRegIrqMask);
1863 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
1864 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1867 ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev);
1871 /* ask for interrupts */
1872 writel(np->irqmask, base + NvRegIrqMask);
1874 spin_lock_irq(&np->lock);
1875 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
1876 writel(0, base + NvRegMulticastAddrB);
1877 writel(0, base + NvRegMulticastMaskA);
1878 writel(0, base + NvRegMulticastMaskB);
1879 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
1880 /* One manual link speed update: Interrupts are enabled, future link
1881 * speed changes cause interrupts and are handled by nv_link_irq().
1885 miistat = readl(base + NvRegMIIStatus);
1886 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1887 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
1889 ret = nv_update_linkspeed(dev);
1892 netif_start_queue(dev);
1894 netif_carrier_on(dev);
1896 printk("%s: no link during initialization.\n", dev->name);
1897 netif_carrier_off(dev);
1900 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1901 spin_unlock_irq(&np->lock);
1909 static int nv_close(struct net_device *dev)
1911 struct fe_priv *np = get_nvpriv(dev);
1914 spin_lock_irq(&np->lock);
1915 np->in_shutdown = 1;
1916 spin_unlock_irq(&np->lock);
1917 synchronize_irq(dev->irq);
1919 del_timer_sync(&np->oom_kick);
1920 del_timer_sync(&np->nic_poll);
1922 netif_stop_queue(dev);
1923 spin_lock_irq(&np->lock);
1928 /* disable interrupts on the nic or we will lock up */
1929 base = get_hwbase(dev);
1930 writel(0, base + NvRegIrqMask);
1932 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
1934 spin_unlock_irq(&np->lock);
1936 free_irq(dev->irq, dev);
1943 /* FIXME: power down nic */
1948 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
1950 struct net_device *dev;
1956 dev = alloc_etherdev(sizeof(struct fe_priv));
1961 np = get_nvpriv(dev);
1962 np->pci_dev = pci_dev;
1963 spin_lock_init(&np->lock);
1964 SET_MODULE_OWNER(dev);
1965 SET_NETDEV_DEV(dev, &pci_dev->dev);
1967 init_timer(&np->oom_kick);
1968 np->oom_kick.data = (unsigned long) dev;
1969 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
1970 init_timer(&np->nic_poll);
1971 np->nic_poll.data = (unsigned long) dev;
1972 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
1974 err = pci_enable_device(pci_dev);
1976 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
1977 err, pci_name(pci_dev));
1981 pci_set_master(pci_dev);
1983 err = pci_request_regions(pci_dev, DRV_NAME);
1989 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1990 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
1991 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
1992 pci_resource_len(pci_dev, i),
1993 pci_resource_flags(pci_dev, i));
1994 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
1995 pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) {
1996 addr = pci_resource_start(pci_dev, i);
2000 if (i == DEVICE_COUNT_RESOURCE) {
2001 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
2006 /* handle different descriptor versions */
2007 if (pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_1 ||
2008 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_2 ||
2009 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_3 ||
2010 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
2011 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_13)
2012 np->desc_ver = DESC_VER_1;
2014 np->desc_ver = DESC_VER_2;
2017 np->base = ioremap(addr, NV_PCI_REGSZ);
2020 dev->base_addr = (unsigned long)np->base;
2021 dev->irq = pci_dev->irq;
2022 np->rx_ring = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
2026 np->tx_ring = &np->rx_ring[RX_RING];
2028 dev->open = nv_open;
2029 dev->stop = nv_close;
2030 dev->hard_start_xmit = nv_start_xmit;
2031 dev->get_stats = nv_get_stats;
2032 dev->change_mtu = nv_change_mtu;
2033 dev->set_multicast_list = nv_set_multicast;
2034 #ifdef CONFIG_NET_POLL_CONTROLLER
2035 dev->poll_controller = nv_poll_controller;
2037 SET_ETHTOOL_OPS(dev, &ops);
2038 dev->tx_timeout = nv_tx_timeout;
2039 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
2041 pci_set_drvdata(pci_dev, dev);
2043 /* read the mac address */
2044 base = get_hwbase(dev);
2045 np->orig_mac[0] = readl(base + NvRegMacAddrA);
2046 np->orig_mac[1] = readl(base + NvRegMacAddrB);
2048 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
2049 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
2050 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
2051 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
2052 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
2053 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
2055 if (!is_valid_ether_addr(dev->dev_addr)) {
2057 * Bad mac address. At least one bios sets the mac address
2058 * to 01:23:45:67:89:ab
2060 printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
2062 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2063 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2064 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
2065 dev->dev_addr[0] = 0x00;
2066 dev->dev_addr[1] = 0x00;
2067 dev->dev_addr[2] = 0x6c;
2068 get_random_bytes(&dev->dev_addr[3], 3);
2071 dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
2072 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2073 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2076 writel(0, base + NvRegWakeUpFlags);
2079 if (np->desc_ver == DESC_VER_1) {
2080 np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID;
2081 if (id->driver_data & DEV_NEED_LASTPACKET1)
2082 np->tx_flags |= NV_TX_LASTPACKET1;
2084 np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID;
2085 if (id->driver_data & DEV_NEED_LASTPACKET1)
2086 np->tx_flags |= NV_TX2_LASTPACKET1;
2088 if (id->driver_data & DEV_IRQMASK_1)
2089 np->irqmask = NVREG_IRQMASK_WANTED_1;
2090 if (id->driver_data & DEV_IRQMASK_2)
2091 np->irqmask = NVREG_IRQMASK_WANTED_2;
2092 if (id->driver_data & DEV_NEED_TIMERIRQ)
2093 np->irqmask |= NVREG_IRQ_TIMER;
2094 if (id->driver_data & DEV_NEED_LINKTIMER) {
2095 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
2096 np->need_linktimer = 1;
2097 np->link_timeout = jiffies + LINK_TIMEOUT;
2099 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
2100 np->need_linktimer = 0;
2103 /* find a suitable phy */
2104 for (i = 1; i < 32; i++) {
2107 spin_lock_irq(&np->lock);
2108 id1 = mii_rw(dev, i, MII_PHYSID1, MII_READ);
2109 spin_unlock_irq(&np->lock);
2110 if (id1 < 0 || id1 == 0xffff)
2112 spin_lock_irq(&np->lock);
2113 id2 = mii_rw(dev, i, MII_PHYSID2, MII_READ);
2114 spin_unlock_irq(&np->lock);
2115 if (id2 < 0 || id2 == 0xffff)
2118 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
2119 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
2120 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
2121 pci_name(pci_dev), id1, id2, i);
2123 np->phy_oui = id1 | id2;
2127 /* PHY in isolate mode? No phy attached and user wants to
2128 * test loopback? Very odd, but can be correct.
2130 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
2139 /* set default link speed settings */
2140 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2144 err = register_netdev(dev);
2146 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
2149 printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
2150 dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
2156 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
2157 np->rx_ring, np->ring_addr);
2158 pci_set_drvdata(pci_dev, NULL);
2160 iounmap(get_hwbase(dev));
2162 pci_release_regions(pci_dev);
2164 pci_disable_device(pci_dev);
2171 static void __devexit nv_remove(struct pci_dev *pci_dev)
2173 struct net_device *dev = pci_get_drvdata(pci_dev);
2174 struct fe_priv *np = get_nvpriv(dev);
2175 u8 __iomem *base = get_hwbase(dev);
2177 unregister_netdev(dev);
2179 /* special op: write back the misordered MAC address - otherwise
2180 * the next nv_probe would see a wrong address.
2182 writel(np->orig_mac[0], base + NvRegMacAddrA);
2183 writel(np->orig_mac[1], base + NvRegMacAddrB);
2185 /* free all structures */
2186 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring, np->ring_addr);
2187 iounmap(get_hwbase(dev));
2188 pci_release_regions(pci_dev);
2189 pci_disable_device(pci_dev);
2191 pci_set_drvdata(pci_dev, NULL);
2194 static struct pci_device_id pci_tbl[] = {
2195 { /* nForce Ethernet Controller */
2196 .vendor = PCI_VENDOR_ID_NVIDIA,
2197 .device = PCI_DEVICE_ID_NVIDIA_NVENET_1,
2198 .subvendor = PCI_ANY_ID,
2199 .subdevice = PCI_ANY_ID,
2200 .driver_data = DEV_IRQMASK_1|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2202 { /* nForce2 Ethernet Controller */
2203 .vendor = PCI_VENDOR_ID_NVIDIA,
2204 .device = PCI_DEVICE_ID_NVIDIA_NVENET_2,
2205 .subvendor = PCI_ANY_ID,
2206 .subdevice = PCI_ANY_ID,
2207 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2209 { /* nForce3 Ethernet Controller */
2210 .vendor = PCI_VENDOR_ID_NVIDIA,
2211 .device = PCI_DEVICE_ID_NVIDIA_NVENET_3,
2212 .subvendor = PCI_ANY_ID,
2213 .subdevice = PCI_ANY_ID,
2214 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2216 { /* nForce3 Ethernet Controller */
2217 .vendor = PCI_VENDOR_ID_NVIDIA,
2218 .device = PCI_DEVICE_ID_NVIDIA_NVENET_4,
2219 .subvendor = PCI_ANY_ID,
2220 .subdevice = PCI_ANY_ID,
2221 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
2223 { /* nForce3 Ethernet Controller */
2224 .vendor = PCI_VENDOR_ID_NVIDIA,
2225 .device = PCI_DEVICE_ID_NVIDIA_NVENET_5,
2226 .subvendor = PCI_ANY_ID,
2227 .subdevice = PCI_ANY_ID,
2228 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
2230 { /* nForce3 Ethernet Controller */
2231 .vendor = PCI_VENDOR_ID_NVIDIA,
2232 .device = PCI_DEVICE_ID_NVIDIA_NVENET_6,
2233 .subvendor = PCI_ANY_ID,
2234 .subdevice = PCI_ANY_ID,
2235 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
2237 { /* nForce3 Ethernet Controller */
2238 .vendor = PCI_VENDOR_ID_NVIDIA,
2239 .device = PCI_DEVICE_ID_NVIDIA_NVENET_7,
2240 .subvendor = PCI_ANY_ID,
2241 .subdevice = PCI_ANY_ID,
2242 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
2244 { /* CK804 Ethernet Controller */
2245 .vendor = PCI_VENDOR_ID_NVIDIA,
2246 .device = PCI_DEVICE_ID_NVIDIA_NVENET_8,
2247 .subvendor = PCI_ANY_ID,
2248 .subdevice = PCI_ANY_ID,
2249 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
2251 { /* CK804 Ethernet Controller */
2252 .vendor = PCI_VENDOR_ID_NVIDIA,
2253 .device = PCI_DEVICE_ID_NVIDIA_NVENET_9,
2254 .subvendor = PCI_ANY_ID,
2255 .subdevice = PCI_ANY_ID,
2256 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
2258 { /* MCP04 Ethernet Controller */
2259 .vendor = PCI_VENDOR_ID_NVIDIA,
2260 .device = PCI_DEVICE_ID_NVIDIA_NVENET_10,
2261 .subvendor = PCI_ANY_ID,
2262 .subdevice = PCI_ANY_ID,
2263 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
2265 { /* MCP04 Ethernet Controller */
2266 .vendor = PCI_VENDOR_ID_NVIDIA,
2267 .device = PCI_DEVICE_ID_NVIDIA_NVENET_11,
2268 .subvendor = PCI_ANY_ID,
2269 .subdevice = PCI_ANY_ID,
2270 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
2272 { /* MCP51 Ethernet Controller */
2273 .vendor = PCI_VENDOR_ID_NVIDIA,
2274 .device = PCI_DEVICE_ID_NVIDIA_NVENET_12,
2275 .subvendor = PCI_ANY_ID,
2276 .subdevice = PCI_ANY_ID,
2277 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
2279 { /* MCP51 Ethernet Controller */
2280 .vendor = PCI_VENDOR_ID_NVIDIA,
2281 .device = PCI_DEVICE_ID_NVIDIA_NVENET_13,
2282 .subvendor = PCI_ANY_ID,
2283 .subdevice = PCI_ANY_ID,
2284 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
2289 static struct pci_driver driver = {
2290 .name = "forcedeth",
2291 .id_table = pci_tbl,
2293 .remove = __devexit_p(nv_remove),
2297 static int __init init_nic(void)
2299 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
2300 return pci_module_init(&driver);
2303 static void __exit exit_nic(void)
2305 pci_unregister_driver(&driver);
2308 module_param(max_interrupt_work, int, 0);
2309 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
2311 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
2312 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
2313 MODULE_LICENSE("GPL");
2315 MODULE_DEVICE_TABLE(pci, pci_tbl);
2317 module_init(init_nic);
2318 module_exit(exit_nic);