1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
28 [link no longer provides useful info -jgarzik]
32 #define DRV_NAME "via-rhine"
33 #define DRV_VERSION "1.4.3"
34 #define DRV_RELDATE "2007-03-06"
37 /* A few user-configurable values.
38 These may be modified when a driver module is loaded. */
40 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41 static int max_interrupt_work = 20;
43 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44 Setting to > 1518 effectively disables this feature. */
45 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
46 || defined(CONFIG_SPARC) || defined(__ia64__) \
47 || defined(__sh__) || defined(__mips__)
48 static int rx_copybreak = 1518;
50 static int rx_copybreak;
53 /* Work-around for broken BIOSes: they are unable to get the chip back out of
54 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58 * In case you are looking for 'options[]' or 'full_duplex[]', they
59 * are gone. Use ethtool(8) instead.
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63 The Rhine has a 64 element 8390-like hash table. */
64 static const int multicast_filter_limit = 32;
67 /* Operational parameters that are set at compile time. */
69 /* Keep the ring sizes a power of two for compile efficiency.
70 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
71 Making the Tx ring too large decreases the effectiveness of channel
72 bonding and packet priority.
73 There are no ill effects from too-large receive rings. */
74 #define TX_RING_SIZE 16
75 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
76 #ifdef CONFIG_VIA_RHINE_NAPI
77 #define RX_RING_SIZE 64
79 #define RX_RING_SIZE 16
83 /* Operational parameters that usually are not changed. */
85 /* Time in jiffies before concluding the transmitter is hung. */
86 #define TX_TIMEOUT (2*HZ)
88 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
90 #include <linux/module.h>
91 #include <linux/moduleparam.h>
92 #include <linux/kernel.h>
93 #include <linux/string.h>
94 #include <linux/timer.h>
95 #include <linux/errno.h>
96 #include <linux/ioport.h>
97 #include <linux/slab.h>
98 #include <linux/interrupt.h>
99 #include <linux/pci.h>
100 #include <linux/dma-mapping.h>
101 #include <linux/netdevice.h>
102 #include <linux/etherdevice.h>
103 #include <linux/skbuff.h>
104 #include <linux/init.h>
105 #include <linux/delay.h>
106 #include <linux/mii.h>
107 #include <linux/ethtool.h>
108 #include <linux/crc32.h>
109 #include <linux/bitops.h>
110 #include <asm/processor.h> /* Processor type for cache alignment. */
113 #include <asm/uaccess.h>
114 #include <linux/dmi.h>
116 /* These identify the driver base version and may not be removed. */
117 static char version[] __devinitdata =
118 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
120 /* This driver was written to use PCI memory space. Some early versions
121 of the Rhine may only work correctly with I/O space accesses. */
122 #ifdef CONFIG_VIA_RHINE_MMIO
127 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
128 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
129 MODULE_LICENSE("GPL");
131 module_param(max_interrupt_work, int, 0);
132 module_param(debug, int, 0);
133 module_param(rx_copybreak, int, 0);
134 module_param(avoid_D3, bool, 0);
135 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
136 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
137 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
138 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
143 I. Board Compatibility
145 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
148 II. Board-specific settings
150 Boards with this chip are functional only in a bus-master PCI slot.
152 Many operational settings are loaded from the EEPROM to the Config word at
153 offset 0x78. For most of these settings, this driver assumes that they are
155 If this driver is compiled to use PCI memory space operations the EEPROM
156 must be configured to enable memory ops.
158 III. Driver operation
162 This driver uses two statically allocated fixed-size descriptor lists
163 formed into rings by a branch from the final descriptor to the beginning of
164 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
166 IIIb/c. Transmit/Receive Structure
168 This driver attempts to use a zero-copy receive and transmit scheme.
170 Alas, all data buffers are required to start on a 32 bit boundary, so
171 the driver must often copy transmit packets into bounce buffers.
173 The driver allocates full frame size skbuffs for the Rx ring buffers at
174 open() time and passes the skb->data field to the chip as receive data
175 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176 a fresh skbuff is allocated and the frame is copied to the new skbuff.
177 When the incoming frame is larger, the skbuff is passed directly up the
178 protocol stack. Buffers consumed this way are replaced by newly allocated
179 skbuffs in the last phase of rhine_rx().
181 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182 using a full-sized skbuff for small frames vs. the copying costs of larger
183 frames. New boards are typically used in generously configured machines
184 and the underfilled buffers have negligible impact compared to the benefit of
185 a single allocation size, so the default value of zero results in never
186 copying packets. When copying is done, the cost is usually mitigated by using
187 a combined copy/checksum routine. Copying also preloads the cache, which is
188 most useful with small frames.
190 Since the VIA chips are only able to transfer data to buffers on 32 bit
191 boundaries, the IP header at offset 14 in an ethernet frame isn't
192 longword aligned for further processing. Copying these unaligned buffers
193 has the beneficial effect of 16-byte aligning the IP header.
195 IIId. Synchronization
197 The driver runs as two independent, single-threaded flows of control. One
198 is the send-packet routine, which enforces single-threaded use by the
199 dev->priv->lock spinlock. The other thread is the interrupt handler, which
200 is single threaded by the hardware and interrupt handling software.
202 The send packet thread has partial control over the Tx ring. It locks the
203 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
204 is not available it stops the transmit queue by calling netif_stop_queue.
206 The interrupt handler has exclusive control over the Rx ring and records stats
207 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
208 empty by incrementing the dirty_tx mark. If at least half of the entries in
209 the Rx ring are available the transmit queue is woken up if it was stopped.
215 Preliminary VT86C100A manual from http://www.via.com.tw/
216 http://www.scyld.com/expert/100mbps.html
217 http://www.scyld.com/expert/NWay.html
218 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
219 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
224 The VT86C100A manual is not reliable information.
225 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
226 in significant performance degradation for bounce buffer copies on transmit
227 and unaligned IP headers on receive.
228 The chip does not pad to minimum transmit length.
233 /* This table drives the PCI probe routines. It's mostly boilerplate in all
234 of the drivers, and will likely be provided by some future kernel.
235 Note the matching code -- the first table entry matchs all 56** cards but
236 second only the 1234 card.
243 VT8231 = 0x50, /* Integrated MAC */
244 VT8233 = 0x60, /* Integrated MAC */
245 VT8235 = 0x74, /* Integrated MAC */
246 VT8237 = 0x78, /* Integrated MAC */
253 VT6105M = 0x90, /* Management adapter */
257 rqWOL = 0x0001, /* Wake-On-LAN support */
258 rqForceReset = 0x0002,
259 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
260 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
261 rqRhineI = 0x0100, /* See comment below */
264 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
265 * MMIO as well as for the collision counter and the Tx FIFO underflow
266 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
269 /* Beware of PCI posted writes */
270 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
272 static const struct pci_device_id rhine_pci_tbl[] = {
273 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
274 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
275 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
276 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
277 { } /* terminate list */
279 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
282 /* Offsets to the device registers. */
283 enum register_offsets {
284 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
286 IntrStatus=0x0C, IntrEnable=0x0E,
287 MulticastFilter0=0x10, MulticastFilter1=0x14,
288 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
289 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
290 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
291 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
292 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
293 StickyHW=0x83, IntrStatus2=0x84,
294 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
295 WOLcrClr1=0xA6, WOLcgClr=0xA7,
296 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
299 /* Bits in ConfigD */
301 BackOptional=0x01, BackModify=0x02,
302 BackCaptureEffect=0x04, BackRandom=0x08
306 /* Registers we check that mmio and reg are the same. */
307 static const int mmio_verify_registers[] = {
308 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
313 /* Bits in the interrupt status/mask registers. */
314 enum intr_status_bits {
315 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
316 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
318 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
319 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
320 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
322 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
323 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
324 IntrTxErrSummary=0x082218,
327 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
336 /* The Rx and Tx buffer descriptors. */
339 u32 desc_length; /* Chain flag, Buffer/frame length */
345 u32 desc_length; /* Chain flag, Tx Config, Frame length */
350 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
351 #define TXDESC 0x00e08000
353 enum rx_status_bits {
354 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
357 /* Bits in *_desc.*_status */
358 enum desc_status_bits {
362 /* Bits in ChipCmd. */
364 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
365 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
366 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
367 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
370 struct rhine_private {
371 /* Descriptor rings */
372 struct rx_desc *rx_ring;
373 struct tx_desc *tx_ring;
374 dma_addr_t rx_ring_dma;
375 dma_addr_t tx_ring_dma;
377 /* The addresses of receive-in-place skbuffs. */
378 struct sk_buff *rx_skbuff[RX_RING_SIZE];
379 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
381 /* The saved address of a sent-in-place packet/buffer, for later free(). */
382 struct sk_buff *tx_skbuff[TX_RING_SIZE];
383 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
385 /* Tx bounce buffers (Rhine-I only) */
386 unsigned char *tx_buf[TX_RING_SIZE];
387 unsigned char *tx_bufs;
388 dma_addr_t tx_bufs_dma;
390 struct pci_dev *pdev;
392 struct net_device *dev;
393 struct napi_struct napi;
394 struct net_device_stats stats;
397 /* Frequently used values: keep some adjacent for cache effect. */
399 struct rx_desc *rx_head_desc;
400 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
401 unsigned int cur_tx, dirty_tx;
402 unsigned int rx_buf_sz; /* Based on MTU+slack. */
405 u8 tx_thresh, rx_thresh;
407 struct mii_if_info mii_if;
411 static int mdio_read(struct net_device *dev, int phy_id, int location);
412 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
413 static int rhine_open(struct net_device *dev);
414 static void rhine_tx_timeout(struct net_device *dev);
415 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
416 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
417 static void rhine_tx(struct net_device *dev);
418 static int rhine_rx(struct net_device *dev, int limit);
419 static void rhine_error(struct net_device *dev, int intr_status);
420 static void rhine_set_rx_mode(struct net_device *dev);
421 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
422 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
423 static const struct ethtool_ops netdev_ethtool_ops;
424 static int rhine_close(struct net_device *dev);
425 static void rhine_shutdown (struct pci_dev *pdev);
427 #define RHINE_WAIT_FOR(condition) do { \
429 while (!(condition) && --i) \
431 if (debug > 1 && i < 512) \
432 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
433 DRV_NAME, 1024-i, __func__, __LINE__); \
436 static inline u32 get_intr_status(struct net_device *dev)
438 struct rhine_private *rp = netdev_priv(dev);
439 void __iomem *ioaddr = rp->base;
442 intr_status = ioread16(ioaddr + IntrStatus);
443 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
444 if (rp->quirks & rqStatusWBRace)
445 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
450 * Get power related registers into sane state.
451 * Notify user about past WOL event.
453 static void rhine_power_init(struct net_device *dev)
455 struct rhine_private *rp = netdev_priv(dev);
456 void __iomem *ioaddr = rp->base;
459 if (rp->quirks & rqWOL) {
460 /* Make sure chip is in power state D0 */
461 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
463 /* Disable "force PME-enable" */
464 iowrite8(0x80, ioaddr + WOLcgClr);
466 /* Clear power-event config bits (WOL) */
467 iowrite8(0xFF, ioaddr + WOLcrClr);
468 /* More recent cards can manage two additional patterns */
469 if (rp->quirks & rq6patterns)
470 iowrite8(0x03, ioaddr + WOLcrClr1);
472 /* Save power-event status bits */
473 wolstat = ioread8(ioaddr + PwrcsrSet);
474 if (rp->quirks & rq6patterns)
475 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
477 /* Clear power-event status bits */
478 iowrite8(0xFF, ioaddr + PwrcsrClr);
479 if (rp->quirks & rq6patterns)
480 iowrite8(0x03, ioaddr + PwrcsrClr1);
486 reason = "Magic packet";
489 reason = "Link went up";
492 reason = "Link went down";
495 reason = "Unicast packet";
498 reason = "Multicast/broadcast packet";
503 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
509 static void rhine_chip_reset(struct net_device *dev)
511 struct rhine_private *rp = netdev_priv(dev);
512 void __iomem *ioaddr = rp->base;
514 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
517 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
518 printk(KERN_INFO "%s: Reset not complete yet. "
519 "Trying harder.\n", DRV_NAME);
522 if (rp->quirks & rqForceReset)
523 iowrite8(0x40, ioaddr + MiscCmd);
525 /* Reset can take somewhat longer (rare) */
526 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
530 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
531 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
532 "failed" : "succeeded");
536 static void enable_mmio(long pioaddr, u32 quirks)
539 if (quirks & rqRhineI) {
540 /* More recent docs say that this bit is reserved ... */
541 n = inb(pioaddr + ConfigA) | 0x20;
542 outb(n, pioaddr + ConfigA);
544 n = inb(pioaddr + ConfigD) | 0x80;
545 outb(n, pioaddr + ConfigD);
551 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
552 * (plus 0x6C for Rhine-I/II)
554 static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
556 struct rhine_private *rp = netdev_priv(dev);
557 void __iomem *ioaddr = rp->base;
559 outb(0x20, pioaddr + MACRegEEcsr);
560 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
564 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
565 * MMIO. If reloading EEPROM was done first this could be avoided, but
566 * it is not known if that still works with the "win98-reboot" problem.
568 enable_mmio(pioaddr, rp->quirks);
571 /* Turn off EEPROM-controlled wake-up (magic packet) */
572 if (rp->quirks & rqWOL)
573 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
577 #ifdef CONFIG_NET_POLL_CONTROLLER
578 static void rhine_poll(struct net_device *dev)
580 disable_irq(dev->irq);
581 rhine_interrupt(dev->irq, (void *)dev);
582 enable_irq(dev->irq);
586 #ifdef CONFIG_VIA_RHINE_NAPI
587 static int rhine_napipoll(struct napi_struct *napi, int budget)
589 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
590 struct net_device *dev = rp->dev;
591 void __iomem *ioaddr = rp->base;
594 work_done = rhine_rx(dev, budget);
596 if (work_done < budget) {
597 netif_rx_complete(dev, napi);
599 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
600 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
601 IntrTxDone | IntrTxError | IntrTxUnderrun |
602 IntrPCIErr | IntrStatsMax | IntrLinkChange,
603 ioaddr + IntrEnable);
609 static void rhine_hw_init(struct net_device *dev, long pioaddr)
611 struct rhine_private *rp = netdev_priv(dev);
613 /* Reset the chip to erase previous misconfiguration. */
614 rhine_chip_reset(dev);
616 /* Rhine-I needs extra time to recuperate before EEPROM reload */
617 if (rp->quirks & rqRhineI)
620 /* Reload EEPROM controlled bytes cleared by soft reset */
621 rhine_reload_eeprom(pioaddr, dev);
624 static int __devinit rhine_init_one(struct pci_dev *pdev,
625 const struct pci_device_id *ent)
627 struct net_device *dev;
628 struct rhine_private *rp;
633 void __iomem *ioaddr;
642 /* when built into the kernel, we only print version if device is found */
644 static int printed_version;
645 if (!printed_version++)
653 if (pdev->revision < VTunknown0) {
657 else if (pdev->revision >= VT6102) {
658 quirks = rqWOL | rqForceReset;
659 if (pdev->revision < VT6105) {
661 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
664 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
665 if (pdev->revision >= VT6105_B0)
666 quirks |= rq6patterns;
667 if (pdev->revision < VT6105M)
670 name = "Rhine III (Management Adapter)";
674 rc = pci_enable_device(pdev);
678 /* this should always be supported */
679 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
681 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
687 if ((pci_resource_len(pdev, 0) < io_size) ||
688 (pci_resource_len(pdev, 1) < io_size)) {
690 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
694 pioaddr = pci_resource_start(pdev, 0);
695 memaddr = pci_resource_start(pdev, 1);
697 pci_set_master(pdev);
699 dev = alloc_etherdev(sizeof(struct rhine_private));
702 printk(KERN_ERR "alloc_etherdev failed\n");
705 SET_MODULE_OWNER(dev);
706 SET_NETDEV_DEV(dev, &pdev->dev);
708 rp = netdev_priv(dev);
711 rp->pioaddr = pioaddr;
714 rc = pci_request_regions(pdev, DRV_NAME);
716 goto err_out_free_netdev;
718 ioaddr = pci_iomap(pdev, bar, io_size);
721 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
722 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
723 goto err_out_free_res;
727 enable_mmio(pioaddr, quirks);
729 /* Check that selected MMIO registers match the PIO ones */
731 while (mmio_verify_registers[i]) {
732 int reg = mmio_verify_registers[i++];
733 unsigned char a = inb(pioaddr+reg);
734 unsigned char b = readb(ioaddr+reg);
737 printk(KERN_ERR "MMIO do not match PIO [%02x] "
738 "(%02x != %02x)\n", reg, a, b);
742 #endif /* USE_MMIO */
744 dev->base_addr = (unsigned long)ioaddr;
747 /* Get chip registers into a sane state */
748 rhine_power_init(dev);
749 rhine_hw_init(dev, pioaddr);
751 for (i = 0; i < 6; i++)
752 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
753 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
755 if (!is_valid_ether_addr(dev->perm_addr)) {
757 printk(KERN_ERR "Invalid MAC address\n");
761 /* For Rhine-I/II, phy_id is loaded from EEPROM */
763 phy_id = ioread8(ioaddr + 0x6C);
765 dev->irq = pdev->irq;
767 spin_lock_init(&rp->lock);
768 rp->mii_if.dev = dev;
769 rp->mii_if.mdio_read = mdio_read;
770 rp->mii_if.mdio_write = mdio_write;
771 rp->mii_if.phy_id_mask = 0x1f;
772 rp->mii_if.reg_num_mask = 0x1f;
774 /* The chip-specific entries in the device structure. */
775 dev->open = rhine_open;
776 dev->hard_start_xmit = rhine_start_tx;
777 dev->stop = rhine_close;
778 dev->get_stats = rhine_get_stats;
779 dev->set_multicast_list = rhine_set_rx_mode;
780 dev->do_ioctl = netdev_ioctl;
781 dev->ethtool_ops = &netdev_ethtool_ops;
782 dev->tx_timeout = rhine_tx_timeout;
783 dev->watchdog_timeo = TX_TIMEOUT;
784 #ifdef CONFIG_NET_POLL_CONTROLLER
785 dev->poll_controller = rhine_poll;
787 #ifdef CONFIG_VIA_RHINE_NAPI
788 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
790 if (rp->quirks & rqRhineI)
791 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
793 /* dev->name not defined before register_netdev()! */
794 rc = register_netdev(dev);
798 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
807 for (i = 0; i < 5; i++)
808 printk("%2.2x:", dev->dev_addr[i]);
809 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
811 pci_set_drvdata(pdev, dev);
815 int mii_status = mdio_read(dev, phy_id, 1);
816 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
817 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
818 if (mii_status != 0xffff && mii_status != 0x0000) {
819 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
820 printk(KERN_INFO "%s: MII PHY found at address "
821 "%d, status 0x%4.4x advertising %4.4x "
822 "Link %4.4x.\n", dev->name, phy_id,
823 mii_status, rp->mii_if.advertising,
824 mdio_read(dev, phy_id, 5));
826 /* set IFF_RUNNING */
827 if (mii_status & BMSR_LSTATUS)
828 netif_carrier_on(dev);
830 netif_carrier_off(dev);
834 rp->mii_if.phy_id = phy_id;
835 if (debug > 1 && avoid_D3)
836 printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
842 pci_iounmap(pdev, ioaddr);
844 pci_release_regions(pdev);
851 static int alloc_ring(struct net_device* dev)
853 struct rhine_private *rp = netdev_priv(dev);
857 ring = pci_alloc_consistent(rp->pdev,
858 RX_RING_SIZE * sizeof(struct rx_desc) +
859 TX_RING_SIZE * sizeof(struct tx_desc),
862 printk(KERN_ERR "Could not allocate DMA memory.\n");
865 if (rp->quirks & rqRhineI) {
866 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
867 PKT_BUF_SZ * TX_RING_SIZE,
869 if (rp->tx_bufs == NULL) {
870 pci_free_consistent(rp->pdev,
871 RX_RING_SIZE * sizeof(struct rx_desc) +
872 TX_RING_SIZE * sizeof(struct tx_desc),
879 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
880 rp->rx_ring_dma = ring_dma;
881 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
886 static void free_ring(struct net_device* dev)
888 struct rhine_private *rp = netdev_priv(dev);
890 pci_free_consistent(rp->pdev,
891 RX_RING_SIZE * sizeof(struct rx_desc) +
892 TX_RING_SIZE * sizeof(struct tx_desc),
893 rp->rx_ring, rp->rx_ring_dma);
897 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
898 rp->tx_bufs, rp->tx_bufs_dma);
904 static void alloc_rbufs(struct net_device *dev)
906 struct rhine_private *rp = netdev_priv(dev);
910 rp->dirty_rx = rp->cur_rx = 0;
912 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
913 rp->rx_head_desc = &rp->rx_ring[0];
914 next = rp->rx_ring_dma;
916 /* Init the ring entries */
917 for (i = 0; i < RX_RING_SIZE; i++) {
918 rp->rx_ring[i].rx_status = 0;
919 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
920 next += sizeof(struct rx_desc);
921 rp->rx_ring[i].next_desc = cpu_to_le32(next);
922 rp->rx_skbuff[i] = NULL;
924 /* Mark the last entry as wrapping the ring. */
925 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
927 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
928 for (i = 0; i < RX_RING_SIZE; i++) {
929 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
930 rp->rx_skbuff[i] = skb;
933 skb->dev = dev; /* Mark as being used by this device. */
935 rp->rx_skbuff_dma[i] =
936 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
939 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
940 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
942 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
945 static void free_rbufs(struct net_device* dev)
947 struct rhine_private *rp = netdev_priv(dev);
950 /* Free all the skbuffs in the Rx queue. */
951 for (i = 0; i < RX_RING_SIZE; i++) {
952 rp->rx_ring[i].rx_status = 0;
953 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
954 if (rp->rx_skbuff[i]) {
955 pci_unmap_single(rp->pdev,
956 rp->rx_skbuff_dma[i],
957 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
958 dev_kfree_skb(rp->rx_skbuff[i]);
960 rp->rx_skbuff[i] = NULL;
964 static void alloc_tbufs(struct net_device* dev)
966 struct rhine_private *rp = netdev_priv(dev);
970 rp->dirty_tx = rp->cur_tx = 0;
971 next = rp->tx_ring_dma;
972 for (i = 0; i < TX_RING_SIZE; i++) {
973 rp->tx_skbuff[i] = NULL;
974 rp->tx_ring[i].tx_status = 0;
975 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
976 next += sizeof(struct tx_desc);
977 rp->tx_ring[i].next_desc = cpu_to_le32(next);
978 if (rp->quirks & rqRhineI)
979 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
981 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
985 static void free_tbufs(struct net_device* dev)
987 struct rhine_private *rp = netdev_priv(dev);
990 for (i = 0; i < TX_RING_SIZE; i++) {
991 rp->tx_ring[i].tx_status = 0;
992 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
993 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
994 if (rp->tx_skbuff[i]) {
995 if (rp->tx_skbuff_dma[i]) {
996 pci_unmap_single(rp->pdev,
997 rp->tx_skbuff_dma[i],
998 rp->tx_skbuff[i]->len,
1001 dev_kfree_skb(rp->tx_skbuff[i]);
1003 rp->tx_skbuff[i] = NULL;
1004 rp->tx_buf[i] = NULL;
1008 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1010 struct rhine_private *rp = netdev_priv(dev);
1011 void __iomem *ioaddr = rp->base;
1013 mii_check_media(&rp->mii_if, debug, init_media);
1015 if (rp->mii_if.full_duplex)
1016 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1019 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1022 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1023 rp->mii_if.force_media, netif_carrier_ok(dev));
1026 /* Called after status of force_media possibly changed */
1027 static void rhine_set_carrier(struct mii_if_info *mii)
1029 if (mii->force_media) {
1030 /* autoneg is off: Link is always assumed to be up */
1031 if (!netif_carrier_ok(mii->dev))
1032 netif_carrier_on(mii->dev);
1034 else /* Let MMI library update carrier status */
1035 rhine_check_media(mii->dev, 0);
1037 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1038 mii->dev->name, mii->force_media,
1039 netif_carrier_ok(mii->dev));
1042 static void init_registers(struct net_device *dev)
1044 struct rhine_private *rp = netdev_priv(dev);
1045 void __iomem *ioaddr = rp->base;
1048 for (i = 0; i < 6; i++)
1049 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1051 /* Initialize other registers. */
1052 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1053 /* Configure initial FIFO thresholds. */
1054 iowrite8(0x20, ioaddr + TxConfig);
1055 rp->tx_thresh = 0x20;
1056 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1058 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1059 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1061 rhine_set_rx_mode(dev);
1063 #ifdef CONFIG_VIA_RHINE_NAPI
1064 napi_enable(&rp->napi);
1067 /* Enable interrupts by setting the interrupt mask. */
1068 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1069 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1070 IntrTxDone | IntrTxError | IntrTxUnderrun |
1071 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1072 ioaddr + IntrEnable);
1074 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1076 rhine_check_media(dev, 1);
1079 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1080 static void rhine_enable_linkmon(void __iomem *ioaddr)
1082 iowrite8(0, ioaddr + MIICmd);
1083 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1084 iowrite8(0x80, ioaddr + MIICmd);
1086 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1088 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1091 /* Disable MII link status auto-polling (required for MDIO access) */
1092 static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1094 iowrite8(0, ioaddr + MIICmd);
1096 if (quirks & rqRhineI) {
1097 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1099 /* Can be called from ISR. Evil. */
1102 /* 0x80 must be set immediately before turning it off */
1103 iowrite8(0x80, ioaddr + MIICmd);
1105 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1107 /* Heh. Now clear 0x80 again. */
1108 iowrite8(0, ioaddr + MIICmd);
1111 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1114 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1116 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1118 struct rhine_private *rp = netdev_priv(dev);
1119 void __iomem *ioaddr = rp->base;
1122 rhine_disable_linkmon(ioaddr, rp->quirks);
1124 /* rhine_disable_linkmon already cleared MIICmd */
1125 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1126 iowrite8(regnum, ioaddr + MIIRegAddr);
1127 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1128 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1129 result = ioread16(ioaddr + MIIData);
1131 rhine_enable_linkmon(ioaddr);
1135 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1137 struct rhine_private *rp = netdev_priv(dev);
1138 void __iomem *ioaddr = rp->base;
1140 rhine_disable_linkmon(ioaddr, rp->quirks);
1142 /* rhine_disable_linkmon already cleared MIICmd */
1143 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1144 iowrite8(regnum, ioaddr + MIIRegAddr);
1145 iowrite16(value, ioaddr + MIIData);
1146 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1147 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1149 rhine_enable_linkmon(ioaddr);
1152 static int rhine_open(struct net_device *dev)
1154 struct rhine_private *rp = netdev_priv(dev);
1155 void __iomem *ioaddr = rp->base;
1158 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
1164 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1165 dev->name, rp->pdev->irq);
1167 rc = alloc_ring(dev);
1169 free_irq(rp->pdev->irq, dev);
1174 rhine_chip_reset(dev);
1175 init_registers(dev);
1177 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1178 "MII status: %4.4x.\n",
1179 dev->name, ioread16(ioaddr + ChipCmd),
1180 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1182 netif_start_queue(dev);
1187 static void rhine_tx_timeout(struct net_device *dev)
1189 struct rhine_private *rp = netdev_priv(dev);
1190 void __iomem *ioaddr = rp->base;
1192 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1193 "%4.4x, resetting...\n",
1194 dev->name, ioread16(ioaddr + IntrStatus),
1195 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1197 /* protect against concurrent rx interrupts */
1198 disable_irq(rp->pdev->irq);
1200 #ifdef CONFIG_VIA_RHINE_NAPI
1201 napi_disable(&rp->napi);
1204 spin_lock(&rp->lock);
1206 /* clear all descriptors */
1212 /* Reinitialize the hardware. */
1213 rhine_chip_reset(dev);
1214 init_registers(dev);
1216 spin_unlock(&rp->lock);
1217 enable_irq(rp->pdev->irq);
1219 dev->trans_start = jiffies;
1220 rp->stats.tx_errors++;
1221 netif_wake_queue(dev);
1224 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1226 struct rhine_private *rp = netdev_priv(dev);
1227 void __iomem *ioaddr = rp->base;
1230 /* Caution: the write order is important here, set the field
1231 with the "ownership" bits last. */
1233 /* Calculate the next Tx descriptor entry. */
1234 entry = rp->cur_tx % TX_RING_SIZE;
1236 if (skb_padto(skb, ETH_ZLEN))
1239 rp->tx_skbuff[entry] = skb;
1241 if ((rp->quirks & rqRhineI) &&
1242 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1243 /* Must use alignment buffer. */
1244 if (skb->len > PKT_BUF_SZ) {
1245 /* packet too long, drop it */
1247 rp->tx_skbuff[entry] = NULL;
1248 rp->stats.tx_dropped++;
1252 /* Padding is not copied and so must be redone. */
1253 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1254 if (skb->len < ETH_ZLEN)
1255 memset(rp->tx_buf[entry] + skb->len, 0,
1256 ETH_ZLEN - skb->len);
1257 rp->tx_skbuff_dma[entry] = 0;
1258 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1259 (rp->tx_buf[entry] -
1262 rp->tx_skbuff_dma[entry] =
1263 pci_map_single(rp->pdev, skb->data, skb->len,
1265 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1268 rp->tx_ring[entry].desc_length =
1269 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1272 spin_lock_irq(&rp->lock);
1274 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1279 /* Non-x86 Todo: explicitly flush cache lines here. */
1281 /* Wake the potentially-idle transmit channel */
1282 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1286 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1287 netif_stop_queue(dev);
1289 dev->trans_start = jiffies;
1291 spin_unlock_irq(&rp->lock);
1294 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1295 dev->name, rp->cur_tx-1, entry);
1300 /* The interrupt handler does all of the Rx thread work and cleans up
1301 after the Tx thread. */
1302 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1304 struct net_device *dev = dev_instance;
1305 struct rhine_private *rp = netdev_priv(dev);
1306 void __iomem *ioaddr = rp->base;
1308 int boguscnt = max_interrupt_work;
1311 while ((intr_status = get_intr_status(dev))) {
1314 /* Acknowledge all of the current interrupt sources ASAP. */
1315 if (intr_status & IntrTxDescRace)
1316 iowrite8(0x08, ioaddr + IntrStatus2);
1317 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1321 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1322 dev->name, intr_status);
1324 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1325 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1326 #ifdef CONFIG_VIA_RHINE_NAPI
1327 iowrite16(IntrTxAborted |
1328 IntrTxDone | IntrTxError | IntrTxUnderrun |
1329 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1330 ioaddr + IntrEnable);
1332 netif_rx_schedule(dev, &rp->napi);
1334 rhine_rx(dev, RX_RING_SIZE);
1338 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1339 if (intr_status & IntrTxErrSummary) {
1340 /* Avoid scavenging before Tx engine turned off */
1341 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1343 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1344 printk(KERN_WARNING "%s: "
1345 "rhine_interrupt() Tx engine"
1346 "still on.\n", dev->name);
1351 /* Abnormal error summary/uncommon events handlers. */
1352 if (intr_status & (IntrPCIErr | IntrLinkChange |
1353 IntrStatsMax | IntrTxError | IntrTxAborted |
1354 IntrTxUnderrun | IntrTxDescRace))
1355 rhine_error(dev, intr_status);
1357 if (--boguscnt < 0) {
1358 printk(KERN_WARNING "%s: Too much work at interrupt, "
1360 dev->name, intr_status);
1366 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1367 dev->name, ioread16(ioaddr + IntrStatus));
1368 return IRQ_RETVAL(handled);
1371 /* This routine is logically part of the interrupt handler, but isolated
1373 static void rhine_tx(struct net_device *dev)
1375 struct rhine_private *rp = netdev_priv(dev);
1376 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1378 spin_lock(&rp->lock);
1380 /* find and cleanup dirty tx descriptors */
1381 while (rp->dirty_tx != rp->cur_tx) {
1382 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1384 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1386 if (txstatus & DescOwn)
1388 if (txstatus & 0x8000) {
1390 printk(KERN_DEBUG "%s: Transmit error, "
1391 "Tx status %8.8x.\n",
1392 dev->name, txstatus);
1393 rp->stats.tx_errors++;
1394 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1395 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1396 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1397 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1398 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1399 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1400 rp->stats.tx_fifo_errors++;
1401 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1402 break; /* Keep the skb - we try again */
1404 /* Transmitter restarted in 'abnormal' handler. */
1406 if (rp->quirks & rqRhineI)
1407 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1409 rp->stats.collisions += txstatus & 0x0F;
1411 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1412 (txstatus >> 3) & 0xF,
1414 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1415 rp->stats.tx_packets++;
1417 /* Free the original skb. */
1418 if (rp->tx_skbuff_dma[entry]) {
1419 pci_unmap_single(rp->pdev,
1420 rp->tx_skbuff_dma[entry],
1421 rp->tx_skbuff[entry]->len,
1424 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1425 rp->tx_skbuff[entry] = NULL;
1426 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1428 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1429 netif_wake_queue(dev);
1431 spin_unlock(&rp->lock);
1434 /* Process up to limit frames from receive ring */
1435 static int rhine_rx(struct net_device *dev, int limit)
1437 struct rhine_private *rp = netdev_priv(dev);
1439 int entry = rp->cur_rx % RX_RING_SIZE;
1442 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1444 le32_to_cpu(rp->rx_head_desc->rx_status));
1447 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1448 for (count = 0; count < limit; ++count) {
1449 struct rx_desc *desc = rp->rx_head_desc;
1450 u32 desc_status = le32_to_cpu(desc->rx_status);
1451 int data_size = desc_status >> 16;
1453 if (desc_status & DescOwn)
1457 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1460 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1461 if ((desc_status & RxWholePkt) != RxWholePkt) {
1462 printk(KERN_WARNING "%s: Oversized Ethernet "
1463 "frame spanned multiple buffers, entry "
1464 "%#x length %d status %8.8x!\n",
1465 dev->name, entry, data_size,
1467 printk(KERN_WARNING "%s: Oversized Ethernet "
1468 "frame %p vs %p.\n", dev->name,
1469 rp->rx_head_desc, &rp->rx_ring[entry]);
1470 rp->stats.rx_length_errors++;
1471 } else if (desc_status & RxErr) {
1472 /* There was a error. */
1474 printk(KERN_DEBUG "rhine_rx() Rx "
1475 "error was %8.8x.\n",
1477 rp->stats.rx_errors++;
1478 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1479 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1480 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1481 if (desc_status & 0x0002) {
1482 /* this can also be updated outside the interrupt handler */
1483 spin_lock(&rp->lock);
1484 rp->stats.rx_crc_errors++;
1485 spin_unlock(&rp->lock);
1489 struct sk_buff *skb;
1490 /* Length should omit the CRC */
1491 int pkt_len = data_size - 4;
1493 /* Check if the packet is long enough to accept without
1494 copying to a minimally-sized skbuff. */
1495 if (pkt_len < rx_copybreak &&
1496 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1497 skb_reserve(skb, 2); /* 16 byte align the IP header */
1498 pci_dma_sync_single_for_cpu(rp->pdev,
1499 rp->rx_skbuff_dma[entry],
1501 PCI_DMA_FROMDEVICE);
1503 skb_copy_to_linear_data(skb,
1504 rp->rx_skbuff[entry]->data,
1506 skb_put(skb, pkt_len);
1507 pci_dma_sync_single_for_device(rp->pdev,
1508 rp->rx_skbuff_dma[entry],
1510 PCI_DMA_FROMDEVICE);
1512 skb = rp->rx_skbuff[entry];
1514 printk(KERN_ERR "%s: Inconsistent Rx "
1515 "descriptor chain.\n",
1519 rp->rx_skbuff[entry] = NULL;
1520 skb_put(skb, pkt_len);
1521 pci_unmap_single(rp->pdev,
1522 rp->rx_skbuff_dma[entry],
1524 PCI_DMA_FROMDEVICE);
1526 skb->protocol = eth_type_trans(skb, dev);
1527 #ifdef CONFIG_VIA_RHINE_NAPI
1528 netif_receive_skb(skb);
1532 dev->last_rx = jiffies;
1533 rp->stats.rx_bytes += pkt_len;
1534 rp->stats.rx_packets++;
1536 entry = (++rp->cur_rx) % RX_RING_SIZE;
1537 rp->rx_head_desc = &rp->rx_ring[entry];
1540 /* Refill the Rx ring buffers. */
1541 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1542 struct sk_buff *skb;
1543 entry = rp->dirty_rx % RX_RING_SIZE;
1544 if (rp->rx_skbuff[entry] == NULL) {
1545 skb = dev_alloc_skb(rp->rx_buf_sz);
1546 rp->rx_skbuff[entry] = skb;
1548 break; /* Better luck next round. */
1549 skb->dev = dev; /* Mark as being used by this device. */
1550 rp->rx_skbuff_dma[entry] =
1551 pci_map_single(rp->pdev, skb->data,
1553 PCI_DMA_FROMDEVICE);
1554 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1556 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1563 * Clears the "tally counters" for CRC errors and missed frames(?).
1564 * It has been reported that some chips need a write of 0 to clear
1565 * these, for others the counters are set to 1 when written to and
1566 * instead cleared when read. So we clear them both ways ...
1568 static inline void clear_tally_counters(void __iomem *ioaddr)
1570 iowrite32(0, ioaddr + RxMissed);
1571 ioread16(ioaddr + RxCRCErrs);
1572 ioread16(ioaddr + RxMissed);
1575 static void rhine_restart_tx(struct net_device *dev) {
1576 struct rhine_private *rp = netdev_priv(dev);
1577 void __iomem *ioaddr = rp->base;
1578 int entry = rp->dirty_tx % TX_RING_SIZE;
1582 * If new errors occured, we need to sort them out before doing Tx.
1583 * In that case the ISR will be back here RSN anyway.
1585 intr_status = get_intr_status(dev);
1587 if ((intr_status & IntrTxErrSummary) == 0) {
1589 /* We know better than the chip where it should continue. */
1590 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1591 ioaddr + TxRingPtr);
1593 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1595 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1600 /* This should never happen */
1602 printk(KERN_WARNING "%s: rhine_restart_tx() "
1603 "Another error occured %8.8x.\n",
1604 dev->name, intr_status);
1609 static void rhine_error(struct net_device *dev, int intr_status)
1611 struct rhine_private *rp = netdev_priv(dev);
1612 void __iomem *ioaddr = rp->base;
1614 spin_lock(&rp->lock);
1616 if (intr_status & IntrLinkChange)
1617 rhine_check_media(dev, 0);
1618 if (intr_status & IntrStatsMax) {
1619 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1620 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1621 clear_tally_counters(ioaddr);
1623 if (intr_status & IntrTxAborted) {
1625 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1626 dev->name, intr_status);
1628 if (intr_status & IntrTxUnderrun) {
1629 if (rp->tx_thresh < 0xE0)
1630 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1632 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1633 "threshold now %2.2x.\n",
1634 dev->name, rp->tx_thresh);
1636 if (intr_status & IntrTxDescRace) {
1638 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1641 if ((intr_status & IntrTxError) &&
1642 (intr_status & (IntrTxAborted |
1643 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1644 if (rp->tx_thresh < 0xE0) {
1645 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1648 printk(KERN_INFO "%s: Unspecified error. Tx "
1649 "threshold now %2.2x.\n",
1650 dev->name, rp->tx_thresh);
1652 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1654 rhine_restart_tx(dev);
1656 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1657 IntrTxError | IntrTxAborted | IntrNormalSummary |
1660 printk(KERN_ERR "%s: Something Wicked happened! "
1661 "%8.8x.\n", dev->name, intr_status);
1664 spin_unlock(&rp->lock);
1667 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1669 struct rhine_private *rp = netdev_priv(dev);
1670 void __iomem *ioaddr = rp->base;
1671 unsigned long flags;
1673 spin_lock_irqsave(&rp->lock, flags);
1674 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1675 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1676 clear_tally_counters(ioaddr);
1677 spin_unlock_irqrestore(&rp->lock, flags);
1682 static void rhine_set_rx_mode(struct net_device *dev)
1684 struct rhine_private *rp = netdev_priv(dev);
1685 void __iomem *ioaddr = rp->base;
1686 u32 mc_filter[2]; /* Multicast hash filter */
1687 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1689 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1691 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1692 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1693 } else if ((dev->mc_count > multicast_filter_limit)
1694 || (dev->flags & IFF_ALLMULTI)) {
1695 /* Too many to match, or accept all multicasts. */
1696 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1697 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1700 struct dev_mc_list *mclist;
1702 memset(mc_filter, 0, sizeof(mc_filter));
1703 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1704 i++, mclist = mclist->next) {
1705 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1707 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1709 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1710 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1713 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1716 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1718 struct rhine_private *rp = netdev_priv(dev);
1720 strcpy(info->driver, DRV_NAME);
1721 strcpy(info->version, DRV_VERSION);
1722 strcpy(info->bus_info, pci_name(rp->pdev));
1725 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1727 struct rhine_private *rp = netdev_priv(dev);
1730 spin_lock_irq(&rp->lock);
1731 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1732 spin_unlock_irq(&rp->lock);
1737 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1739 struct rhine_private *rp = netdev_priv(dev);
1742 spin_lock_irq(&rp->lock);
1743 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1744 spin_unlock_irq(&rp->lock);
1745 rhine_set_carrier(&rp->mii_if);
1750 static int netdev_nway_reset(struct net_device *dev)
1752 struct rhine_private *rp = netdev_priv(dev);
1754 return mii_nway_restart(&rp->mii_if);
1757 static u32 netdev_get_link(struct net_device *dev)
1759 struct rhine_private *rp = netdev_priv(dev);
1761 return mii_link_ok(&rp->mii_if);
1764 static u32 netdev_get_msglevel(struct net_device *dev)
1769 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1774 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1776 struct rhine_private *rp = netdev_priv(dev);
1778 if (!(rp->quirks & rqWOL))
1781 spin_lock_irq(&rp->lock);
1782 wol->supported = WAKE_PHY | WAKE_MAGIC |
1783 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1784 wol->wolopts = rp->wolopts;
1785 spin_unlock_irq(&rp->lock);
1788 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1790 struct rhine_private *rp = netdev_priv(dev);
1791 u32 support = WAKE_PHY | WAKE_MAGIC |
1792 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1794 if (!(rp->quirks & rqWOL))
1797 if (wol->wolopts & ~support)
1800 spin_lock_irq(&rp->lock);
1801 rp->wolopts = wol->wolopts;
1802 spin_unlock_irq(&rp->lock);
1807 static const struct ethtool_ops netdev_ethtool_ops = {
1808 .get_drvinfo = netdev_get_drvinfo,
1809 .get_settings = netdev_get_settings,
1810 .set_settings = netdev_set_settings,
1811 .nway_reset = netdev_nway_reset,
1812 .get_link = netdev_get_link,
1813 .get_msglevel = netdev_get_msglevel,
1814 .set_msglevel = netdev_set_msglevel,
1815 .get_wol = rhine_get_wol,
1816 .set_wol = rhine_set_wol,
1817 .get_sg = ethtool_op_get_sg,
1818 .get_tx_csum = ethtool_op_get_tx_csum,
1821 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1823 struct rhine_private *rp = netdev_priv(dev);
1826 if (!netif_running(dev))
1829 spin_lock_irq(&rp->lock);
1830 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1831 spin_unlock_irq(&rp->lock);
1832 rhine_set_carrier(&rp->mii_if);
1837 static int rhine_close(struct net_device *dev)
1839 struct rhine_private *rp = netdev_priv(dev);
1840 void __iomem *ioaddr = rp->base;
1842 spin_lock_irq(&rp->lock);
1844 netif_stop_queue(dev);
1845 #ifdef CONFIG_VIA_RHINE_NAPI
1846 napi_disable(&rp->napi);
1850 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1851 "status was %4.4x.\n",
1852 dev->name, ioread16(ioaddr + ChipCmd));
1854 /* Switch to loopback mode to avoid hardware races. */
1855 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1857 /* Disable interrupts by clearing the interrupt mask. */
1858 iowrite16(0x0000, ioaddr + IntrEnable);
1860 /* Stop the chip's Tx and Rx processes. */
1861 iowrite16(CmdStop, ioaddr + ChipCmd);
1863 spin_unlock_irq(&rp->lock);
1865 free_irq(rp->pdev->irq, dev);
1874 static void __devexit rhine_remove_one(struct pci_dev *pdev)
1876 struct net_device *dev = pci_get_drvdata(pdev);
1877 struct rhine_private *rp = netdev_priv(dev);
1879 unregister_netdev(dev);
1881 pci_iounmap(pdev, rp->base);
1882 pci_release_regions(pdev);
1885 pci_disable_device(pdev);
1886 pci_set_drvdata(pdev, NULL);
1889 static void rhine_shutdown (struct pci_dev *pdev)
1891 struct net_device *dev = pci_get_drvdata(pdev);
1892 struct rhine_private *rp = netdev_priv(dev);
1893 void __iomem *ioaddr = rp->base;
1895 if (!(rp->quirks & rqWOL))
1896 return; /* Nothing to do for non-WOL adapters */
1898 rhine_power_init(dev);
1900 /* Make sure we use pattern 0, 1 and not 4, 5 */
1901 if (rp->quirks & rq6patterns)
1902 iowrite8(0x04, ioaddr + 0xA7);
1904 if (rp->wolopts & WAKE_MAGIC) {
1905 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1907 * Turn EEPROM-controlled wake-up back on -- some hardware may
1908 * not cooperate otherwise.
1910 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1913 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1914 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1916 if (rp->wolopts & WAKE_PHY)
1917 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1919 if (rp->wolopts & WAKE_UCAST)
1920 iowrite8(WOLucast, ioaddr + WOLcrSet);
1923 /* Enable legacy WOL (for old motherboards) */
1924 iowrite8(0x01, ioaddr + PwcfgSet);
1925 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1928 /* Hit power state D3 (sleep) */
1930 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1932 /* TODO: Check use of pci_enable_wake() */
1937 static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1939 struct net_device *dev = pci_get_drvdata(pdev);
1940 struct rhine_private *rp = netdev_priv(dev);
1941 unsigned long flags;
1943 if (!netif_running(dev))
1946 #ifdef CONFIG_VIA_RHINE_NAPI
1947 napi_disable(&rp->napi);
1949 netif_device_detach(dev);
1950 pci_save_state(pdev);
1952 spin_lock_irqsave(&rp->lock, flags);
1953 rhine_shutdown(pdev);
1954 spin_unlock_irqrestore(&rp->lock, flags);
1956 free_irq(dev->irq, dev);
1960 static int rhine_resume(struct pci_dev *pdev)
1962 struct net_device *dev = pci_get_drvdata(pdev);
1963 struct rhine_private *rp = netdev_priv(dev);
1964 unsigned long flags;
1967 if (!netif_running(dev))
1970 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1971 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1973 ret = pci_set_power_state(pdev, PCI_D0);
1975 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1976 dev->name, ret ? "failed" : "succeeded", ret);
1978 pci_restore_state(pdev);
1980 spin_lock_irqsave(&rp->lock, flags);
1982 enable_mmio(rp->pioaddr, rp->quirks);
1984 rhine_power_init(dev);
1989 init_registers(dev);
1990 spin_unlock_irqrestore(&rp->lock, flags);
1992 netif_device_attach(dev);
1996 #endif /* CONFIG_PM */
1998 static struct pci_driver rhine_driver = {
2000 .id_table = rhine_pci_tbl,
2001 .probe = rhine_init_one,
2002 .remove = __devexit_p(rhine_remove_one),
2004 .suspend = rhine_suspend,
2005 .resume = rhine_resume,
2006 #endif /* CONFIG_PM */
2007 .shutdown = rhine_shutdown,
2010 static struct dmi_system_id __initdata rhine_dmi_table[] = {
2014 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2015 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2021 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2022 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2028 static int __init rhine_init(void)
2030 /* when a module, this is printed whether or not devices are found in probe */
2034 if (dmi_check_system(rhine_dmi_table)) {
2035 /* these BIOSes fail at PXE boot if chip is in D3 */
2037 printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 "
2042 printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME);
2044 return pci_register_driver(&rhine_driver);
2048 static void __exit rhine_cleanup(void)
2050 pci_unregister_driver(&rhine_driver);
2054 module_init(rhine_init);
2055 module_exit(rhine_cleanup);