1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
28 [link no longer provides useful info -jgarzik]
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #define DRV_NAME "via-rhine"
35 #define DRV_VERSION "1.5.0"
36 #define DRV_RELDATE "2010-10-09"
38 #include <linux/types.h>
40 /* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
43 #define RHINE_MSG_DEFAULT \
46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47 Setting to > 1518 effectively disables this feature. */
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
53 static int rx_copybreak;
56 /* Work-around for broken BIOSes: they are unable to get the chip back out of
57 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
61 * In case you are looking for 'options[]' or 'full_duplex[]', they
62 * are gone. Use ethtool(8) instead.
65 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66 The Rhine has a 64 element 8390-like hash table. */
67 static const int multicast_filter_limit = 32;
70 /* Operational parameters that are set at compile time. */
72 /* Keep the ring sizes a power of two for compile efficiency.
73 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74 Making the Tx ring too large decreases the effectiveness of channel
75 bonding and packet priority.
76 There are no ill effects from too-large receive rings. */
77 #define TX_RING_SIZE 16
78 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
79 #define RX_RING_SIZE 64
81 /* Operational parameters that usually are not changed. */
83 /* Time in jiffies before concluding the transmitter is hung. */
84 #define TX_TIMEOUT (2*HZ)
86 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
88 #include <linux/module.h>
89 #include <linux/moduleparam.h>
90 #include <linux/kernel.h>
91 #include <linux/string.h>
92 #include <linux/timer.h>
93 #include <linux/errno.h>
94 #include <linux/ioport.h>
95 #include <linux/interrupt.h>
96 #include <linux/pci.h>
97 #include <linux/dma-mapping.h>
98 #include <linux/netdevice.h>
99 #include <linux/etherdevice.h>
100 #include <linux/skbuff.h>
101 #include <linux/init.h>
102 #include <linux/delay.h>
103 #include <linux/mii.h>
104 #include <linux/ethtool.h>
105 #include <linux/crc32.h>
106 #include <linux/if_vlan.h>
107 #include <linux/bitops.h>
108 #include <linux/workqueue.h>
109 #include <asm/processor.h> /* Processor type for cache alignment. */
112 #include <asm/uaccess.h>
113 #include <linux/dmi.h>
115 /* These identify the driver base version and may not be removed. */
116 static const char version[] =
117 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
119 /* This driver was written to use PCI memory space. Some early versions
120 of the Rhine may only work correctly with I/O space accesses. */
121 #ifdef CONFIG_VIA_RHINE_MMIO
126 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128 MODULE_LICENSE("GPL");
130 module_param(debug, int, 0);
131 module_param(rx_copybreak, int, 0);
132 module_param(avoid_D3, bool, 0);
133 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
134 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
135 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
143 I. Board Compatibility
145 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
148 II. Board-specific settings
150 Boards with this chip are functional only in a bus-master PCI slot.
152 Many operational settings are loaded from the EEPROM to the Config word at
153 offset 0x78. For most of these settings, this driver assumes that they are
155 If this driver is compiled to use PCI memory space operations the EEPROM
156 must be configured to enable memory ops.
158 III. Driver operation
162 This driver uses two statically allocated fixed-size descriptor lists
163 formed into rings by a branch from the final descriptor to the beginning of
164 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
166 IIIb/c. Transmit/Receive Structure
168 This driver attempts to use a zero-copy receive and transmit scheme.
170 Alas, all data buffers are required to start on a 32 bit boundary, so
171 the driver must often copy transmit packets into bounce buffers.
173 The driver allocates full frame size skbuffs for the Rx ring buffers at
174 open() time and passes the skb->data field to the chip as receive data
175 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176 a fresh skbuff is allocated and the frame is copied to the new skbuff.
177 When the incoming frame is larger, the skbuff is passed directly up the
178 protocol stack. Buffers consumed this way are replaced by newly allocated
179 skbuffs in the last phase of rhine_rx().
181 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182 using a full-sized skbuff for small frames vs. the copying costs of larger
183 frames. New boards are typically used in generously configured machines
184 and the underfilled buffers have negligible impact compared to the benefit of
185 a single allocation size, so the default value of zero results in never
186 copying packets. When copying is done, the cost is usually mitigated by using
187 a combined copy/checksum routine. Copying also preloads the cache, which is
188 most useful with small frames.
190 Since the VIA chips are only able to transfer data to buffers on 32 bit
191 boundaries, the IP header at offset 14 in an ethernet frame isn't
192 longword aligned for further processing. Copying these unaligned buffers
193 has the beneficial effect of 16-byte aligning the IP header.
195 IIId. Synchronization
197 The driver runs as two independent, single-threaded flows of control. One
198 is the send-packet routine, which enforces single-threaded use by the
199 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
200 which is single threaded by the hardware and interrupt handling software.
202 The send packet thread has partial control over the Tx ring. It locks the
203 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
204 the ring is not available it stops the transmit queue by
205 calling netif_stop_queue.
207 The interrupt handler has exclusive control over the Rx ring and records stats
208 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
209 empty by incrementing the dirty_tx mark. If at least half of the entries in
210 the Rx ring are available the transmit queue is woken up if it was stopped.
216 Preliminary VT86C100A manual from http://www.via.com.tw/
217 http://www.scyld.com/expert/100mbps.html
218 http://www.scyld.com/expert/NWay.html
219 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
220 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
225 The VT86C100A manual is not reliable information.
226 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
227 in significant performance degradation for bounce buffer copies on transmit
228 and unaligned IP headers on receive.
229 The chip does not pad to minimum transmit length.
234 /* This table drives the PCI probe routines. It's mostly boilerplate in all
235 of the drivers, and will likely be provided by some future kernel.
236 Note the matching code -- the first table entry matchs all 56** cards but
237 second only the 1234 card.
244 VT8231 = 0x50, /* Integrated MAC */
245 VT8233 = 0x60, /* Integrated MAC */
246 VT8235 = 0x74, /* Integrated MAC */
247 VT8237 = 0x78, /* Integrated MAC */
254 VT6105M = 0x90, /* Management adapter */
258 rqWOL = 0x0001, /* Wake-On-LAN support */
259 rqForceReset = 0x0002,
260 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
261 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
262 rqRhineI = 0x0100, /* See comment below */
265 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
266 * MMIO as well as for the collision counter and the Tx FIFO underflow
267 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
270 /* Beware of PCI posted writes */
271 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
273 static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
274 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
275 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
276 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
277 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
278 { } /* terminate list */
280 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
283 /* Offsets to the device registers. */
284 enum register_offsets {
285 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
286 ChipCmd1=0x09, TQWake=0x0A,
287 IntrStatus=0x0C, IntrEnable=0x0E,
288 MulticastFilter0=0x10, MulticastFilter1=0x14,
289 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
290 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
291 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
292 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
293 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
294 StickyHW=0x83, IntrStatus2=0x84,
295 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
296 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
297 WOLcrClr1=0xA6, WOLcgClr=0xA7,
298 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
301 /* Bits in ConfigD */
303 BackOptional=0x01, BackModify=0x02,
304 BackCaptureEffect=0x04, BackRandom=0x08
307 /* Bits in the TxConfig (TCR) register */
310 TCR_LB0=0x02, /* loopback[0] */
311 TCR_LB1=0x04, /* loopback[1] */
319 /* Bits in the CamCon (CAMC) register */
327 /* Bits in the PCIBusConfig1 (BCR1) register */
335 BCR1_TXQNOBK=0x40, /* for VT6105 */
336 BCR1_VIDFR=0x80, /* for VT6105 */
337 BCR1_MED0=0x40, /* for VT6102 */
338 BCR1_MED1=0x80, /* for VT6102 */
342 /* Registers we check that mmio and reg are the same. */
343 static const int mmio_verify_registers[] = {
344 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
349 /* Bits in the interrupt status/mask registers. */
350 enum intr_status_bits {
354 IntrTxError = 0x0008,
355 IntrRxEmpty = 0x0020,
357 IntrStatsMax = 0x0080,
358 IntrRxEarly = 0x0100,
359 IntrTxUnderrun = 0x0210,
360 IntrRxOverflow = 0x0400,
361 IntrRxDropped = 0x0800,
362 IntrRxNoBuf = 0x1000,
363 IntrTxAborted = 0x2000,
364 IntrLinkChange = 0x4000,
365 IntrRxWakeUp = 0x8000,
366 IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
367 IntrNormalSummary = IntrRxDone | IntrTxDone,
368 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
372 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
381 /* The Rx and Tx buffer descriptors. */
384 __le32 desc_length; /* Chain flag, Buffer/frame length */
390 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
395 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
396 #define TXDESC 0x00e08000
398 enum rx_status_bits {
399 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
402 /* Bits in *_desc.*_status */
403 enum desc_status_bits {
407 /* Bits in *_desc.*_length */
408 enum desc_length_bits {
412 /* Bits in ChipCmd. */
414 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
415 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
416 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
417 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
423 struct u64_stats_sync syncp;
426 struct rhine_private {
427 /* Bit mask for configured VLAN ids */
428 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
430 /* Descriptor rings */
431 struct rx_desc *rx_ring;
432 struct tx_desc *tx_ring;
433 dma_addr_t rx_ring_dma;
434 dma_addr_t tx_ring_dma;
436 /* The addresses of receive-in-place skbuffs. */
437 struct sk_buff *rx_skbuff[RX_RING_SIZE];
438 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
440 /* The saved address of a sent-in-place packet/buffer, for later free(). */
441 struct sk_buff *tx_skbuff[TX_RING_SIZE];
442 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
444 /* Tx bounce buffers (Rhine-I only) */
445 unsigned char *tx_buf[TX_RING_SIZE];
446 unsigned char *tx_bufs;
447 dma_addr_t tx_bufs_dma;
449 struct pci_dev *pdev;
451 struct net_device *dev;
452 struct napi_struct napi;
454 struct mutex task_lock;
456 struct work_struct slow_event_task;
457 struct work_struct reset_task;
461 /* Frequently used values: keep some adjacent for cache effect. */
463 struct rx_desc *rx_head_desc;
464 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
465 unsigned int cur_tx, dirty_tx;
466 unsigned int rx_buf_sz; /* Based on MTU+slack. */
467 struct rhine_stats rx_stats;
468 struct rhine_stats tx_stats;
471 u8 tx_thresh, rx_thresh;
473 struct mii_if_info mii_if;
477 #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
478 #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
479 #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
481 #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
482 #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
483 #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
485 #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
486 #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
487 #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
489 #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
490 #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
491 #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
494 static int mdio_read(struct net_device *dev, int phy_id, int location);
495 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
496 static int rhine_open(struct net_device *dev);
497 static void rhine_reset_task(struct work_struct *work);
498 static void rhine_slow_event_task(struct work_struct *work);
499 static void rhine_tx_timeout(struct net_device *dev);
500 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
501 struct net_device *dev);
502 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
503 static void rhine_tx(struct net_device *dev);
504 static int rhine_rx(struct net_device *dev, int limit);
505 static void rhine_set_rx_mode(struct net_device *dev);
506 static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
507 struct rtnl_link_stats64 *stats);
508 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
509 static const struct ethtool_ops netdev_ethtool_ops;
510 static int rhine_close(struct net_device *dev);
511 static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
512 static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
513 static void rhine_restart_tx(struct net_device *dev);
515 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
517 void __iomem *ioaddr = rp->base;
520 for (i = 0; i < 1024; i++) {
521 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
523 if (low ^ has_mask_bits)
528 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
529 "count: %04d\n", low ? "low" : "high", reg, mask, i);
533 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
535 rhine_wait_bit(rp, reg, mask, false);
538 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
540 rhine_wait_bit(rp, reg, mask, true);
543 static u32 rhine_get_events(struct rhine_private *rp)
545 void __iomem *ioaddr = rp->base;
548 intr_status = ioread16(ioaddr + IntrStatus);
549 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
550 if (rp->quirks & rqStatusWBRace)
551 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
555 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
557 void __iomem *ioaddr = rp->base;
559 if (rp->quirks & rqStatusWBRace)
560 iowrite8(mask >> 16, ioaddr + IntrStatus2);
561 iowrite16(mask, ioaddr + IntrStatus);
566 * Get power related registers into sane state.
567 * Notify user about past WOL event.
569 static void rhine_power_init(struct net_device *dev)
571 struct rhine_private *rp = netdev_priv(dev);
572 void __iomem *ioaddr = rp->base;
575 if (rp->quirks & rqWOL) {
576 /* Make sure chip is in power state D0 */
577 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
579 /* Disable "force PME-enable" */
580 iowrite8(0x80, ioaddr + WOLcgClr);
582 /* Clear power-event config bits (WOL) */
583 iowrite8(0xFF, ioaddr + WOLcrClr);
584 /* More recent cards can manage two additional patterns */
585 if (rp->quirks & rq6patterns)
586 iowrite8(0x03, ioaddr + WOLcrClr1);
588 /* Save power-event status bits */
589 wolstat = ioread8(ioaddr + PwrcsrSet);
590 if (rp->quirks & rq6patterns)
591 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
593 /* Clear power-event status bits */
594 iowrite8(0xFF, ioaddr + PwrcsrClr);
595 if (rp->quirks & rq6patterns)
596 iowrite8(0x03, ioaddr + PwrcsrClr1);
602 reason = "Magic packet";
605 reason = "Link went up";
608 reason = "Link went down";
611 reason = "Unicast packet";
614 reason = "Multicast/broadcast packet";
619 netdev_info(dev, "Woke system up. Reason: %s\n",
625 static void rhine_chip_reset(struct net_device *dev)
627 struct rhine_private *rp = netdev_priv(dev);
628 void __iomem *ioaddr = rp->base;
631 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
634 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
635 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
638 if (rp->quirks & rqForceReset)
639 iowrite8(0x40, ioaddr + MiscCmd);
641 /* Reset can take somewhat longer (rare) */
642 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
645 cmd1 = ioread8(ioaddr + ChipCmd1);
646 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
647 "failed" : "succeeded");
651 static void enable_mmio(long pioaddr, u32 quirks)
654 if (quirks & rqRhineI) {
655 /* More recent docs say that this bit is reserved ... */
656 n = inb(pioaddr + ConfigA) | 0x20;
657 outb(n, pioaddr + ConfigA);
659 n = inb(pioaddr + ConfigD) | 0x80;
660 outb(n, pioaddr + ConfigD);
666 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
667 * (plus 0x6C for Rhine-I/II)
669 static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
671 struct rhine_private *rp = netdev_priv(dev);
672 void __iomem *ioaddr = rp->base;
675 outb(0x20, pioaddr + MACRegEEcsr);
676 for (i = 0; i < 1024; i++) {
677 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
681 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
685 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
686 * MMIO. If reloading EEPROM was done first this could be avoided, but
687 * it is not known if that still works with the "win98-reboot" problem.
689 enable_mmio(pioaddr, rp->quirks);
692 /* Turn off EEPROM-controlled wake-up (magic packet) */
693 if (rp->quirks & rqWOL)
694 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
698 #ifdef CONFIG_NET_POLL_CONTROLLER
699 static void rhine_poll(struct net_device *dev)
701 struct rhine_private *rp = netdev_priv(dev);
702 const int irq = rp->pdev->irq;
705 rhine_interrupt(irq, dev);
710 static void rhine_kick_tx_threshold(struct rhine_private *rp)
712 if (rp->tx_thresh < 0xe0) {
713 void __iomem *ioaddr = rp->base;
715 rp->tx_thresh += 0x20;
716 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
720 static void rhine_tx_err(struct rhine_private *rp, u32 status)
722 struct net_device *dev = rp->dev;
724 if (status & IntrTxAborted) {
725 netif_info(rp, tx_err, dev,
726 "Abort %08x, frame dropped\n", status);
729 if (status & IntrTxUnderrun) {
730 rhine_kick_tx_threshold(rp);
731 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
732 "Tx threshold now %02x\n", rp->tx_thresh);
735 if (status & IntrTxDescRace)
736 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
738 if ((status & IntrTxError) &&
739 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
740 rhine_kick_tx_threshold(rp);
741 netif_info(rp, tx_err, dev, "Unspecified error. "
742 "Tx threshold now %02x\n", rp->tx_thresh);
745 rhine_restart_tx(dev);
748 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
750 void __iomem *ioaddr = rp->base;
751 struct net_device_stats *stats = &rp->dev->stats;
753 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
754 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
757 * Clears the "tally counters" for CRC errors and missed frames(?).
758 * It has been reported that some chips need a write of 0 to clear
759 * these, for others the counters are set to 1 when written to and
760 * instead cleared when read. So we clear them both ways ...
762 iowrite32(0, ioaddr + RxMissed);
763 ioread16(ioaddr + RxCRCErrs);
764 ioread16(ioaddr + RxMissed);
767 #define RHINE_EVENT_NAPI_RX (IntrRxDone | \
775 #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
779 #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
781 #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
782 RHINE_EVENT_NAPI_TX | \
784 #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
785 #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
787 static int rhine_napipoll(struct napi_struct *napi, int budget)
789 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
790 struct net_device *dev = rp->dev;
791 void __iomem *ioaddr = rp->base;
792 u16 enable_mask = RHINE_EVENT & 0xffff;
796 status = rhine_get_events(rp);
797 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
799 if (status & RHINE_EVENT_NAPI_RX)
800 work_done += rhine_rx(dev, budget);
802 if (status & RHINE_EVENT_NAPI_TX) {
803 if (status & RHINE_EVENT_NAPI_TX_ERR) {
804 /* Avoid scavenging before Tx engine turned off */
805 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
806 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
807 netif_warn(rp, tx_err, dev, "Tx still on\n");
812 if (status & RHINE_EVENT_NAPI_TX_ERR)
813 rhine_tx_err(rp, status);
816 if (status & IntrStatsMax) {
817 spin_lock(&rp->lock);
818 rhine_update_rx_crc_and_missed_errord(rp);
819 spin_unlock(&rp->lock);
822 if (status & RHINE_EVENT_SLOW) {
823 enable_mask &= ~RHINE_EVENT_SLOW;
824 schedule_work(&rp->slow_event_task);
827 if (work_done < budget) {
829 iowrite16(enable_mask, ioaddr + IntrEnable);
835 static void rhine_hw_init(struct net_device *dev, long pioaddr)
837 struct rhine_private *rp = netdev_priv(dev);
839 /* Reset the chip to erase previous misconfiguration. */
840 rhine_chip_reset(dev);
842 /* Rhine-I needs extra time to recuperate before EEPROM reload */
843 if (rp->quirks & rqRhineI)
846 /* Reload EEPROM controlled bytes cleared by soft reset */
847 rhine_reload_eeprom(pioaddr, dev);
850 static const struct net_device_ops rhine_netdev_ops = {
851 .ndo_open = rhine_open,
852 .ndo_stop = rhine_close,
853 .ndo_start_xmit = rhine_start_tx,
854 .ndo_get_stats64 = rhine_get_stats64,
855 .ndo_set_rx_mode = rhine_set_rx_mode,
856 .ndo_change_mtu = eth_change_mtu,
857 .ndo_validate_addr = eth_validate_addr,
858 .ndo_set_mac_address = eth_mac_addr,
859 .ndo_do_ioctl = netdev_ioctl,
860 .ndo_tx_timeout = rhine_tx_timeout,
861 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
862 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
863 #ifdef CONFIG_NET_POLL_CONTROLLER
864 .ndo_poll_controller = rhine_poll,
868 static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
870 struct net_device *dev;
871 struct rhine_private *rp;
876 void __iomem *ioaddr;
885 /* when built into the kernel, we only print version if device is found */
887 pr_info_once("%s\n", version);
894 if (pdev->revision < VTunknown0) {
898 else if (pdev->revision >= VT6102) {
899 quirks = rqWOL | rqForceReset;
900 if (pdev->revision < VT6105) {
902 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
905 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
906 if (pdev->revision >= VT6105_B0)
907 quirks |= rq6patterns;
908 if (pdev->revision < VT6105M)
911 name = "Rhine III (Management Adapter)";
915 rc = pci_enable_device(pdev);
919 /* this should always be supported */
920 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
923 "32-bit PCI DMA addresses not supported by the card!?\n");
928 if ((pci_resource_len(pdev, 0) < io_size) ||
929 (pci_resource_len(pdev, 1) < io_size)) {
931 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
935 pioaddr = pci_resource_start(pdev, 0);
936 memaddr = pci_resource_start(pdev, 1);
938 pci_set_master(pdev);
940 dev = alloc_etherdev(sizeof(struct rhine_private));
945 SET_NETDEV_DEV(dev, &pdev->dev);
947 rp = netdev_priv(dev);
950 rp->pioaddr = pioaddr;
952 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
954 rc = pci_request_regions(pdev, DRV_NAME);
956 goto err_out_free_netdev;
958 ioaddr = pci_iomap(pdev, bar, io_size);
962 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
963 pci_name(pdev), io_size, memaddr);
964 goto err_out_free_res;
968 enable_mmio(pioaddr, quirks);
970 /* Check that selected MMIO registers match the PIO ones */
972 while (mmio_verify_registers[i]) {
973 int reg = mmio_verify_registers[i++];
974 unsigned char a = inb(pioaddr+reg);
975 unsigned char b = readb(ioaddr+reg);
979 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
984 #endif /* USE_MMIO */
988 /* Get chip registers into a sane state */
989 rhine_power_init(dev);
990 rhine_hw_init(dev, pioaddr);
992 for (i = 0; i < 6; i++)
993 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
995 if (!is_valid_ether_addr(dev->dev_addr)) {
996 /* Report it and use a random ethernet address instead */
997 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
998 eth_hw_addr_random(dev);
999 netdev_info(dev, "Using random MAC address: %pM\n",
1003 /* For Rhine-I/II, phy_id is loaded from EEPROM */
1005 phy_id = ioread8(ioaddr + 0x6C);
1007 spin_lock_init(&rp->lock);
1008 mutex_init(&rp->task_lock);
1009 INIT_WORK(&rp->reset_task, rhine_reset_task);
1010 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
1012 rp->mii_if.dev = dev;
1013 rp->mii_if.mdio_read = mdio_read;
1014 rp->mii_if.mdio_write = mdio_write;
1015 rp->mii_if.phy_id_mask = 0x1f;
1016 rp->mii_if.reg_num_mask = 0x1f;
1018 /* The chip-specific entries in the device structure. */
1019 dev->netdev_ops = &rhine_netdev_ops;
1020 dev->ethtool_ops = &netdev_ethtool_ops,
1021 dev->watchdog_timeo = TX_TIMEOUT;
1023 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1025 if (rp->quirks & rqRhineI)
1026 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1028 if (pdev->revision >= VT6105M)
1029 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1030 NETIF_F_HW_VLAN_FILTER;
1032 /* dev->name not defined before register_netdev()! */
1033 rc = register_netdev(dev);
1037 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1044 dev->dev_addr, pdev->irq);
1046 pci_set_drvdata(pdev, dev);
1050 int mii_status = mdio_read(dev, phy_id, 1);
1051 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1052 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1053 if (mii_status != 0xffff && mii_status != 0x0000) {
1054 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1056 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1058 mii_status, rp->mii_if.advertising,
1059 mdio_read(dev, phy_id, 5));
1061 /* set IFF_RUNNING */
1062 if (mii_status & BMSR_LSTATUS)
1063 netif_carrier_on(dev);
1065 netif_carrier_off(dev);
1069 rp->mii_if.phy_id = phy_id;
1071 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1076 pci_iounmap(pdev, ioaddr);
1078 pci_release_regions(pdev);
1079 err_out_free_netdev:
1085 static int alloc_ring(struct net_device* dev)
1087 struct rhine_private *rp = netdev_priv(dev);
1089 dma_addr_t ring_dma;
1091 ring = pci_alloc_consistent(rp->pdev,
1092 RX_RING_SIZE * sizeof(struct rx_desc) +
1093 TX_RING_SIZE * sizeof(struct tx_desc),
1096 netdev_err(dev, "Could not allocate DMA memory\n");
1099 if (rp->quirks & rqRhineI) {
1100 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
1101 PKT_BUF_SZ * TX_RING_SIZE,
1103 if (rp->tx_bufs == NULL) {
1104 pci_free_consistent(rp->pdev,
1105 RX_RING_SIZE * sizeof(struct rx_desc) +
1106 TX_RING_SIZE * sizeof(struct tx_desc),
1113 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1114 rp->rx_ring_dma = ring_dma;
1115 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1120 static void free_ring(struct net_device* dev)
1122 struct rhine_private *rp = netdev_priv(dev);
1124 pci_free_consistent(rp->pdev,
1125 RX_RING_SIZE * sizeof(struct rx_desc) +
1126 TX_RING_SIZE * sizeof(struct tx_desc),
1127 rp->rx_ring, rp->rx_ring_dma);
1131 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
1132 rp->tx_bufs, rp->tx_bufs_dma);
1138 static void alloc_rbufs(struct net_device *dev)
1140 struct rhine_private *rp = netdev_priv(dev);
1144 rp->dirty_rx = rp->cur_rx = 0;
1146 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1147 rp->rx_head_desc = &rp->rx_ring[0];
1148 next = rp->rx_ring_dma;
1150 /* Init the ring entries */
1151 for (i = 0; i < RX_RING_SIZE; i++) {
1152 rp->rx_ring[i].rx_status = 0;
1153 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1154 next += sizeof(struct rx_desc);
1155 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1156 rp->rx_skbuff[i] = NULL;
1158 /* Mark the last entry as wrapping the ring. */
1159 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1161 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1162 for (i = 0; i < RX_RING_SIZE; i++) {
1163 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1164 rp->rx_skbuff[i] = skb;
1168 rp->rx_skbuff_dma[i] =
1169 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1170 PCI_DMA_FROMDEVICE);
1172 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1173 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1175 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1178 static void free_rbufs(struct net_device* dev)
1180 struct rhine_private *rp = netdev_priv(dev);
1183 /* Free all the skbuffs in the Rx queue. */
1184 for (i = 0; i < RX_RING_SIZE; i++) {
1185 rp->rx_ring[i].rx_status = 0;
1186 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1187 if (rp->rx_skbuff[i]) {
1188 pci_unmap_single(rp->pdev,
1189 rp->rx_skbuff_dma[i],
1190 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1191 dev_kfree_skb(rp->rx_skbuff[i]);
1193 rp->rx_skbuff[i] = NULL;
1197 static void alloc_tbufs(struct net_device* dev)
1199 struct rhine_private *rp = netdev_priv(dev);
1203 rp->dirty_tx = rp->cur_tx = 0;
1204 next = rp->tx_ring_dma;
1205 for (i = 0; i < TX_RING_SIZE; i++) {
1206 rp->tx_skbuff[i] = NULL;
1207 rp->tx_ring[i].tx_status = 0;
1208 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1209 next += sizeof(struct tx_desc);
1210 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1211 if (rp->quirks & rqRhineI)
1212 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1214 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1218 static void free_tbufs(struct net_device* dev)
1220 struct rhine_private *rp = netdev_priv(dev);
1223 for (i = 0; i < TX_RING_SIZE; i++) {
1224 rp->tx_ring[i].tx_status = 0;
1225 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1226 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1227 if (rp->tx_skbuff[i]) {
1228 if (rp->tx_skbuff_dma[i]) {
1229 pci_unmap_single(rp->pdev,
1230 rp->tx_skbuff_dma[i],
1231 rp->tx_skbuff[i]->len,
1234 dev_kfree_skb(rp->tx_skbuff[i]);
1236 rp->tx_skbuff[i] = NULL;
1237 rp->tx_buf[i] = NULL;
1241 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1243 struct rhine_private *rp = netdev_priv(dev);
1244 void __iomem *ioaddr = rp->base;
1246 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1248 if (rp->mii_if.full_duplex)
1249 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1252 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1255 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1256 rp->mii_if.force_media, netif_carrier_ok(dev));
1259 /* Called after status of force_media possibly changed */
1260 static void rhine_set_carrier(struct mii_if_info *mii)
1262 struct net_device *dev = mii->dev;
1263 struct rhine_private *rp = netdev_priv(dev);
1265 if (mii->force_media) {
1266 /* autoneg is off: Link is always assumed to be up */
1267 if (!netif_carrier_ok(dev))
1268 netif_carrier_on(dev);
1269 } else /* Let MMI library update carrier status */
1270 rhine_check_media(dev, 0);
1272 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1273 mii->force_media, netif_carrier_ok(dev));
1277 * rhine_set_cam - set CAM multicast filters
1278 * @ioaddr: register block of this Rhine
1279 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1280 * @addr: multicast address (6 bytes)
1282 * Load addresses into multicast filters.
1284 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1288 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1291 /* Paranoid -- idx out of range should never happen */
1292 idx &= (MCAM_SIZE - 1);
1294 iowrite8((u8) idx, ioaddr + CamAddr);
1296 for (i = 0; i < 6; i++, addr++)
1297 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1301 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1304 iowrite8(0, ioaddr + CamCon);
1308 * rhine_set_vlan_cam - set CAM VLAN filters
1309 * @ioaddr: register block of this Rhine
1310 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1311 * @addr: VLAN ID (2 bytes)
1313 * Load addresses into VLAN filters.
1315 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1317 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1320 /* Paranoid -- idx out of range should never happen */
1321 idx &= (VCAM_SIZE - 1);
1323 iowrite8((u8) idx, ioaddr + CamAddr);
1325 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1329 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1332 iowrite8(0, ioaddr + CamCon);
1336 * rhine_set_cam_mask - set multicast CAM mask
1337 * @ioaddr: register block of this Rhine
1338 * @mask: multicast CAM mask
1340 * Mask sets multicast filters active/inactive.
1342 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1344 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1348 iowrite32(mask, ioaddr + CamMask);
1351 iowrite8(0, ioaddr + CamCon);
1355 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1356 * @ioaddr: register block of this Rhine
1357 * @mask: VLAN CAM mask
1359 * Mask sets VLAN filters active/inactive.
1361 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1363 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1367 iowrite32(mask, ioaddr + CamMask);
1370 iowrite8(0, ioaddr + CamCon);
1374 * rhine_init_cam_filter - initialize CAM filters
1375 * @dev: network device
1377 * Initialize (disable) hardware VLAN and multicast support on this
1380 static void rhine_init_cam_filter(struct net_device *dev)
1382 struct rhine_private *rp = netdev_priv(dev);
1383 void __iomem *ioaddr = rp->base;
1385 /* Disable all CAMs */
1386 rhine_set_vlan_cam_mask(ioaddr, 0);
1387 rhine_set_cam_mask(ioaddr, 0);
1389 /* disable hardware VLAN support */
1390 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1391 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1395 * rhine_update_vcam - update VLAN CAM filters
1396 * @rp: rhine_private data of this Rhine
1398 * Update VLAN CAM filters to match configuration change.
1400 static void rhine_update_vcam(struct net_device *dev)
1402 struct rhine_private *rp = netdev_priv(dev);
1403 void __iomem *ioaddr = rp->base;
1405 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1408 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1409 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1411 if (++i >= VCAM_SIZE)
1414 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1417 static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1419 struct rhine_private *rp = netdev_priv(dev);
1421 spin_lock_bh(&rp->lock);
1422 set_bit(vid, rp->active_vlans);
1423 rhine_update_vcam(dev);
1424 spin_unlock_bh(&rp->lock);
1428 static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1430 struct rhine_private *rp = netdev_priv(dev);
1432 spin_lock_bh(&rp->lock);
1433 clear_bit(vid, rp->active_vlans);
1434 rhine_update_vcam(dev);
1435 spin_unlock_bh(&rp->lock);
1439 static void init_registers(struct net_device *dev)
1441 struct rhine_private *rp = netdev_priv(dev);
1442 void __iomem *ioaddr = rp->base;
1445 for (i = 0; i < 6; i++)
1446 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1448 /* Initialize other registers. */
1449 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1450 /* Configure initial FIFO thresholds. */
1451 iowrite8(0x20, ioaddr + TxConfig);
1452 rp->tx_thresh = 0x20;
1453 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1455 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1456 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1458 rhine_set_rx_mode(dev);
1460 if (rp->pdev->revision >= VT6105M)
1461 rhine_init_cam_filter(dev);
1463 napi_enable(&rp->napi);
1465 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1467 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1469 rhine_check_media(dev, 1);
1472 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1473 static void rhine_enable_linkmon(struct rhine_private *rp)
1475 void __iomem *ioaddr = rp->base;
1477 iowrite8(0, ioaddr + MIICmd);
1478 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1479 iowrite8(0x80, ioaddr + MIICmd);
1481 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1483 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1486 /* Disable MII link status auto-polling (required for MDIO access) */
1487 static void rhine_disable_linkmon(struct rhine_private *rp)
1489 void __iomem *ioaddr = rp->base;
1491 iowrite8(0, ioaddr + MIICmd);
1493 if (rp->quirks & rqRhineI) {
1494 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1496 /* Can be called from ISR. Evil. */
1499 /* 0x80 must be set immediately before turning it off */
1500 iowrite8(0x80, ioaddr + MIICmd);
1502 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1504 /* Heh. Now clear 0x80 again. */
1505 iowrite8(0, ioaddr + MIICmd);
1508 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1511 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1513 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1515 struct rhine_private *rp = netdev_priv(dev);
1516 void __iomem *ioaddr = rp->base;
1519 rhine_disable_linkmon(rp);
1521 /* rhine_disable_linkmon already cleared MIICmd */
1522 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1523 iowrite8(regnum, ioaddr + MIIRegAddr);
1524 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1525 rhine_wait_bit_low(rp, MIICmd, 0x40);
1526 result = ioread16(ioaddr + MIIData);
1528 rhine_enable_linkmon(rp);
1532 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1534 struct rhine_private *rp = netdev_priv(dev);
1535 void __iomem *ioaddr = rp->base;
1537 rhine_disable_linkmon(rp);
1539 /* rhine_disable_linkmon already cleared MIICmd */
1540 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1541 iowrite8(regnum, ioaddr + MIIRegAddr);
1542 iowrite16(value, ioaddr + MIIData);
1543 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1544 rhine_wait_bit_low(rp, MIICmd, 0x20);
1546 rhine_enable_linkmon(rp);
1549 static void rhine_task_disable(struct rhine_private *rp)
1551 mutex_lock(&rp->task_lock);
1552 rp->task_enable = false;
1553 mutex_unlock(&rp->task_lock);
1555 cancel_work_sync(&rp->slow_event_task);
1556 cancel_work_sync(&rp->reset_task);
1559 static void rhine_task_enable(struct rhine_private *rp)
1561 mutex_lock(&rp->task_lock);
1562 rp->task_enable = true;
1563 mutex_unlock(&rp->task_lock);
1566 static int rhine_open(struct net_device *dev)
1568 struct rhine_private *rp = netdev_priv(dev);
1569 void __iomem *ioaddr = rp->base;
1572 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1577 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1579 rc = alloc_ring(dev);
1581 free_irq(rp->pdev->irq, dev);
1586 rhine_chip_reset(dev);
1587 rhine_task_enable(rp);
1588 init_registers(dev);
1590 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1591 __func__, ioread16(ioaddr + ChipCmd),
1592 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1594 netif_start_queue(dev);
1599 static void rhine_reset_task(struct work_struct *work)
1601 struct rhine_private *rp = container_of(work, struct rhine_private,
1603 struct net_device *dev = rp->dev;
1605 mutex_lock(&rp->task_lock);
1607 if (!rp->task_enable)
1610 napi_disable(&rp->napi);
1611 spin_lock_bh(&rp->lock);
1613 /* clear all descriptors */
1619 /* Reinitialize the hardware. */
1620 rhine_chip_reset(dev);
1621 init_registers(dev);
1623 spin_unlock_bh(&rp->lock);
1625 dev->trans_start = jiffies; /* prevent tx timeout */
1626 dev->stats.tx_errors++;
1627 netif_wake_queue(dev);
1630 mutex_unlock(&rp->task_lock);
1633 static void rhine_tx_timeout(struct net_device *dev)
1635 struct rhine_private *rp = netdev_priv(dev);
1636 void __iomem *ioaddr = rp->base;
1638 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1639 ioread16(ioaddr + IntrStatus),
1640 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1642 schedule_work(&rp->reset_task);
1645 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1646 struct net_device *dev)
1648 struct rhine_private *rp = netdev_priv(dev);
1649 void __iomem *ioaddr = rp->base;
1652 /* Caution: the write order is important here, set the field
1653 with the "ownership" bits last. */
1655 /* Calculate the next Tx descriptor entry. */
1656 entry = rp->cur_tx % TX_RING_SIZE;
1658 if (skb_padto(skb, ETH_ZLEN))
1659 return NETDEV_TX_OK;
1661 rp->tx_skbuff[entry] = skb;
1663 if ((rp->quirks & rqRhineI) &&
1664 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1665 /* Must use alignment buffer. */
1666 if (skb->len > PKT_BUF_SZ) {
1667 /* packet too long, drop it */
1669 rp->tx_skbuff[entry] = NULL;
1670 dev->stats.tx_dropped++;
1671 return NETDEV_TX_OK;
1674 /* Padding is not copied and so must be redone. */
1675 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1676 if (skb->len < ETH_ZLEN)
1677 memset(rp->tx_buf[entry] + skb->len, 0,
1678 ETH_ZLEN - skb->len);
1679 rp->tx_skbuff_dma[entry] = 0;
1680 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1681 (rp->tx_buf[entry] -
1684 rp->tx_skbuff_dma[entry] =
1685 pci_map_single(rp->pdev, skb->data, skb->len,
1687 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1690 rp->tx_ring[entry].desc_length =
1691 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1693 if (unlikely(vlan_tx_tag_present(skb))) {
1694 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1695 /* request tagging */
1696 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1699 rp->tx_ring[entry].tx_status = 0;
1703 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1708 /* Non-x86 Todo: explicitly flush cache lines here. */
1710 if (vlan_tx_tag_present(skb))
1711 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1712 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1714 /* Wake the potentially-idle transmit channel */
1715 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1719 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1720 netif_stop_queue(dev);
1722 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1723 rp->cur_tx - 1, entry);
1725 return NETDEV_TX_OK;
1728 static void rhine_irq_disable(struct rhine_private *rp)
1730 iowrite16(0x0000, rp->base + IntrEnable);
1734 /* The interrupt handler does all of the Rx thread work and cleans up
1735 after the Tx thread. */
1736 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1738 struct net_device *dev = dev_instance;
1739 struct rhine_private *rp = netdev_priv(dev);
1743 status = rhine_get_events(rp);
1745 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1747 if (status & RHINE_EVENT) {
1750 rhine_irq_disable(rp);
1751 napi_schedule(&rp->napi);
1754 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1755 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1759 return IRQ_RETVAL(handled);
1762 /* This routine is logically part of the interrupt handler, but isolated
1764 static void rhine_tx(struct net_device *dev)
1766 struct rhine_private *rp = netdev_priv(dev);
1767 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1769 /* find and cleanup dirty tx descriptors */
1770 while (rp->dirty_tx != rp->cur_tx) {
1771 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1772 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1774 if (txstatus & DescOwn)
1776 if (txstatus & 0x8000) {
1777 netif_dbg(rp, tx_done, dev,
1778 "Transmit error, Tx status %08x\n", txstatus);
1779 dev->stats.tx_errors++;
1780 if (txstatus & 0x0400)
1781 dev->stats.tx_carrier_errors++;
1782 if (txstatus & 0x0200)
1783 dev->stats.tx_window_errors++;
1784 if (txstatus & 0x0100)
1785 dev->stats.tx_aborted_errors++;
1786 if (txstatus & 0x0080)
1787 dev->stats.tx_heartbeat_errors++;
1788 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1789 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1790 dev->stats.tx_fifo_errors++;
1791 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1792 break; /* Keep the skb - we try again */
1794 /* Transmitter restarted in 'abnormal' handler. */
1796 if (rp->quirks & rqRhineI)
1797 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1799 dev->stats.collisions += txstatus & 0x0F;
1800 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1801 (txstatus >> 3) & 0xF, txstatus & 0xF);
1803 u64_stats_update_begin(&rp->tx_stats.syncp);
1804 rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1805 rp->tx_stats.packets++;
1806 u64_stats_update_end(&rp->tx_stats.syncp);
1808 /* Free the original skb. */
1809 if (rp->tx_skbuff_dma[entry]) {
1810 pci_unmap_single(rp->pdev,
1811 rp->tx_skbuff_dma[entry],
1812 rp->tx_skbuff[entry]->len,
1815 dev_kfree_skb(rp->tx_skbuff[entry]);
1816 rp->tx_skbuff[entry] = NULL;
1817 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1819 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1820 netif_wake_queue(dev);
1824 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1825 * @skb: pointer to sk_buff
1826 * @data_size: used data area of the buffer including CRC
1828 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1829 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1830 * aligned following the CRC.
1832 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1834 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1835 return be16_to_cpup((__be16 *)trailer);
1838 /* Process up to limit frames from receive ring */
1839 static int rhine_rx(struct net_device *dev, int limit)
1841 struct rhine_private *rp = netdev_priv(dev);
1843 int entry = rp->cur_rx % RX_RING_SIZE;
1845 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1846 entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1848 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1849 for (count = 0; count < limit; ++count) {
1850 struct rx_desc *desc = rp->rx_head_desc;
1851 u32 desc_status = le32_to_cpu(desc->rx_status);
1852 u32 desc_length = le32_to_cpu(desc->desc_length);
1853 int data_size = desc_status >> 16;
1855 if (desc_status & DescOwn)
1858 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1861 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1862 if ((desc_status & RxWholePkt) != RxWholePkt) {
1864 "Oversized Ethernet frame spanned multiple buffers, "
1865 "entry %#x length %d status %08x!\n",
1869 "Oversized Ethernet frame %p vs %p\n",
1871 &rp->rx_ring[entry]);
1872 dev->stats.rx_length_errors++;
1873 } else if (desc_status & RxErr) {
1874 /* There was a error. */
1875 netif_dbg(rp, rx_err, dev,
1876 "%s() Rx error %08x\n", __func__,
1878 dev->stats.rx_errors++;
1879 if (desc_status & 0x0030)
1880 dev->stats.rx_length_errors++;
1881 if (desc_status & 0x0048)
1882 dev->stats.rx_fifo_errors++;
1883 if (desc_status & 0x0004)
1884 dev->stats.rx_frame_errors++;
1885 if (desc_status & 0x0002) {
1886 /* this can also be updated outside the interrupt handler */
1887 spin_lock(&rp->lock);
1888 dev->stats.rx_crc_errors++;
1889 spin_unlock(&rp->lock);
1893 struct sk_buff *skb = NULL;
1894 /* Length should omit the CRC */
1895 int pkt_len = data_size - 4;
1898 /* Check if the packet is long enough to accept without
1899 copying to a minimally-sized skbuff. */
1900 if (pkt_len < rx_copybreak)
1901 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1903 pci_dma_sync_single_for_cpu(rp->pdev,
1904 rp->rx_skbuff_dma[entry],
1906 PCI_DMA_FROMDEVICE);
1908 skb_copy_to_linear_data(skb,
1909 rp->rx_skbuff[entry]->data,
1911 skb_put(skb, pkt_len);
1912 pci_dma_sync_single_for_device(rp->pdev,
1913 rp->rx_skbuff_dma[entry],
1915 PCI_DMA_FROMDEVICE);
1917 skb = rp->rx_skbuff[entry];
1919 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1922 rp->rx_skbuff[entry] = NULL;
1923 skb_put(skb, pkt_len);
1924 pci_unmap_single(rp->pdev,
1925 rp->rx_skbuff_dma[entry],
1927 PCI_DMA_FROMDEVICE);
1930 if (unlikely(desc_length & DescTag))
1931 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1933 skb->protocol = eth_type_trans(skb, dev);
1935 if (unlikely(desc_length & DescTag))
1936 __vlan_hwaccel_put_tag(skb, vlan_tci);
1937 netif_receive_skb(skb);
1939 u64_stats_update_begin(&rp->rx_stats.syncp);
1940 rp->rx_stats.bytes += pkt_len;
1941 rp->rx_stats.packets++;
1942 u64_stats_update_end(&rp->rx_stats.syncp);
1944 entry = (++rp->cur_rx) % RX_RING_SIZE;
1945 rp->rx_head_desc = &rp->rx_ring[entry];
1948 /* Refill the Rx ring buffers. */
1949 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1950 struct sk_buff *skb;
1951 entry = rp->dirty_rx % RX_RING_SIZE;
1952 if (rp->rx_skbuff[entry] == NULL) {
1953 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1954 rp->rx_skbuff[entry] = skb;
1956 break; /* Better luck next round. */
1957 rp->rx_skbuff_dma[entry] =
1958 pci_map_single(rp->pdev, skb->data,
1960 PCI_DMA_FROMDEVICE);
1961 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1963 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1969 static void rhine_restart_tx(struct net_device *dev) {
1970 struct rhine_private *rp = netdev_priv(dev);
1971 void __iomem *ioaddr = rp->base;
1972 int entry = rp->dirty_tx % TX_RING_SIZE;
1976 * If new errors occurred, we need to sort them out before doing Tx.
1977 * In that case the ISR will be back here RSN anyway.
1979 intr_status = rhine_get_events(rp);
1981 if ((intr_status & IntrTxErrSummary) == 0) {
1983 /* We know better than the chip where it should continue. */
1984 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1985 ioaddr + TxRingPtr);
1987 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1990 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1991 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1992 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1994 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1999 /* This should never happen */
2000 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2006 static void rhine_slow_event_task(struct work_struct *work)
2008 struct rhine_private *rp =
2009 container_of(work, struct rhine_private, slow_event_task);
2010 struct net_device *dev = rp->dev;
2013 mutex_lock(&rp->task_lock);
2015 if (!rp->task_enable)
2018 intr_status = rhine_get_events(rp);
2019 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2021 if (intr_status & IntrLinkChange)
2022 rhine_check_media(dev, 0);
2024 if (intr_status & IntrPCIErr)
2025 netif_warn(rp, hw, dev, "PCI error\n");
2027 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2030 mutex_unlock(&rp->task_lock);
2033 static struct rtnl_link_stats64 *
2034 rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2036 struct rhine_private *rp = netdev_priv(dev);
2039 spin_lock_bh(&rp->lock);
2040 rhine_update_rx_crc_and_missed_errord(rp);
2041 spin_unlock_bh(&rp->lock);
2043 netdev_stats_to_stats64(stats, &dev->stats);
2046 start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp);
2047 stats->rx_packets = rp->rx_stats.packets;
2048 stats->rx_bytes = rp->rx_stats.bytes;
2049 } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start));
2052 start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp);
2053 stats->tx_packets = rp->tx_stats.packets;
2054 stats->tx_bytes = rp->tx_stats.bytes;
2055 } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start));
2060 static void rhine_set_rx_mode(struct net_device *dev)
2062 struct rhine_private *rp = netdev_priv(dev);
2063 void __iomem *ioaddr = rp->base;
2064 u32 mc_filter[2]; /* Multicast hash filter */
2065 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
2066 struct netdev_hw_addr *ha;
2068 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2070 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2071 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2072 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2073 (dev->flags & IFF_ALLMULTI)) {
2074 /* Too many to match, or accept all multicasts. */
2075 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2076 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2077 } else if (rp->pdev->revision >= VT6105M) {
2079 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2080 netdev_for_each_mc_addr(ha, dev) {
2083 rhine_set_cam(ioaddr, i, ha->addr);
2087 rhine_set_cam_mask(ioaddr, mCAMmask);
2089 memset(mc_filter, 0, sizeof(mc_filter));
2090 netdev_for_each_mc_addr(ha, dev) {
2091 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2093 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2095 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2096 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2098 /* enable/disable VLAN receive filtering */
2099 if (rp->pdev->revision >= VT6105M) {
2100 if (dev->flags & IFF_PROMISC)
2101 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2103 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2105 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2108 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2110 struct rhine_private *rp = netdev_priv(dev);
2112 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2113 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2114 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
2117 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2119 struct rhine_private *rp = netdev_priv(dev);
2122 mutex_lock(&rp->task_lock);
2123 rc = mii_ethtool_gset(&rp->mii_if, cmd);
2124 mutex_unlock(&rp->task_lock);
2129 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2131 struct rhine_private *rp = netdev_priv(dev);
2134 mutex_lock(&rp->task_lock);
2135 rc = mii_ethtool_sset(&rp->mii_if, cmd);
2136 rhine_set_carrier(&rp->mii_if);
2137 mutex_unlock(&rp->task_lock);
2142 static int netdev_nway_reset(struct net_device *dev)
2144 struct rhine_private *rp = netdev_priv(dev);
2146 return mii_nway_restart(&rp->mii_if);
2149 static u32 netdev_get_link(struct net_device *dev)
2151 struct rhine_private *rp = netdev_priv(dev);
2153 return mii_link_ok(&rp->mii_if);
2156 static u32 netdev_get_msglevel(struct net_device *dev)
2158 struct rhine_private *rp = netdev_priv(dev);
2160 return rp->msg_enable;
2163 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2165 struct rhine_private *rp = netdev_priv(dev);
2167 rp->msg_enable = value;
2170 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2172 struct rhine_private *rp = netdev_priv(dev);
2174 if (!(rp->quirks & rqWOL))
2177 spin_lock_irq(&rp->lock);
2178 wol->supported = WAKE_PHY | WAKE_MAGIC |
2179 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2180 wol->wolopts = rp->wolopts;
2181 spin_unlock_irq(&rp->lock);
2184 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2186 struct rhine_private *rp = netdev_priv(dev);
2187 u32 support = WAKE_PHY | WAKE_MAGIC |
2188 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2190 if (!(rp->quirks & rqWOL))
2193 if (wol->wolopts & ~support)
2196 spin_lock_irq(&rp->lock);
2197 rp->wolopts = wol->wolopts;
2198 spin_unlock_irq(&rp->lock);
2203 static const struct ethtool_ops netdev_ethtool_ops = {
2204 .get_drvinfo = netdev_get_drvinfo,
2205 .get_settings = netdev_get_settings,
2206 .set_settings = netdev_set_settings,
2207 .nway_reset = netdev_nway_reset,
2208 .get_link = netdev_get_link,
2209 .get_msglevel = netdev_get_msglevel,
2210 .set_msglevel = netdev_set_msglevel,
2211 .get_wol = rhine_get_wol,
2212 .set_wol = rhine_set_wol,
2215 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2217 struct rhine_private *rp = netdev_priv(dev);
2220 if (!netif_running(dev))
2223 mutex_lock(&rp->task_lock);
2224 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2225 rhine_set_carrier(&rp->mii_if);
2226 mutex_unlock(&rp->task_lock);
2231 static int rhine_close(struct net_device *dev)
2233 struct rhine_private *rp = netdev_priv(dev);
2234 void __iomem *ioaddr = rp->base;
2236 rhine_task_disable(rp);
2237 napi_disable(&rp->napi);
2238 netif_stop_queue(dev);
2240 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2241 ioread16(ioaddr + ChipCmd));
2243 /* Switch to loopback mode to avoid hardware races. */
2244 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2246 rhine_irq_disable(rp);
2248 /* Stop the chip's Tx and Rx processes. */
2249 iowrite16(CmdStop, ioaddr + ChipCmd);
2251 free_irq(rp->pdev->irq, dev);
2260 static void rhine_remove_one(struct pci_dev *pdev)
2262 struct net_device *dev = pci_get_drvdata(pdev);
2263 struct rhine_private *rp = netdev_priv(dev);
2265 unregister_netdev(dev);
2267 pci_iounmap(pdev, rp->base);
2268 pci_release_regions(pdev);
2271 pci_disable_device(pdev);
2272 pci_set_drvdata(pdev, NULL);
2275 static void rhine_shutdown (struct pci_dev *pdev)
2277 struct net_device *dev = pci_get_drvdata(pdev);
2278 struct rhine_private *rp = netdev_priv(dev);
2279 void __iomem *ioaddr = rp->base;
2281 if (!(rp->quirks & rqWOL))
2282 return; /* Nothing to do for non-WOL adapters */
2284 rhine_power_init(dev);
2286 /* Make sure we use pattern 0, 1 and not 4, 5 */
2287 if (rp->quirks & rq6patterns)
2288 iowrite8(0x04, ioaddr + WOLcgClr);
2290 spin_lock(&rp->lock);
2292 if (rp->wolopts & WAKE_MAGIC) {
2293 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2295 * Turn EEPROM-controlled wake-up back on -- some hardware may
2296 * not cooperate otherwise.
2298 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2301 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2302 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2304 if (rp->wolopts & WAKE_PHY)
2305 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2307 if (rp->wolopts & WAKE_UCAST)
2308 iowrite8(WOLucast, ioaddr + WOLcrSet);
2311 /* Enable legacy WOL (for old motherboards) */
2312 iowrite8(0x01, ioaddr + PwcfgSet);
2313 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2316 spin_unlock(&rp->lock);
2318 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2319 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2321 pci_wake_from_d3(pdev, true);
2322 pci_set_power_state(pdev, PCI_D3hot);
2326 #ifdef CONFIG_PM_SLEEP
2327 static int rhine_suspend(struct device *device)
2329 struct pci_dev *pdev = to_pci_dev(device);
2330 struct net_device *dev = pci_get_drvdata(pdev);
2331 struct rhine_private *rp = netdev_priv(dev);
2333 if (!netif_running(dev))
2336 rhine_task_disable(rp);
2337 rhine_irq_disable(rp);
2338 napi_disable(&rp->napi);
2340 netif_device_detach(dev);
2342 rhine_shutdown(pdev);
2347 static int rhine_resume(struct device *device)
2349 struct pci_dev *pdev = to_pci_dev(device);
2350 struct net_device *dev = pci_get_drvdata(pdev);
2351 struct rhine_private *rp = netdev_priv(dev);
2353 if (!netif_running(dev))
2357 enable_mmio(rp->pioaddr, rp->quirks);
2359 rhine_power_init(dev);
2364 rhine_task_enable(rp);
2365 spin_lock_bh(&rp->lock);
2366 init_registers(dev);
2367 spin_unlock_bh(&rp->lock);
2369 netif_device_attach(dev);
2374 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2375 #define RHINE_PM_OPS (&rhine_pm_ops)
2379 #define RHINE_PM_OPS NULL
2381 #endif /* !CONFIG_PM_SLEEP */
2383 static struct pci_driver rhine_driver = {
2385 .id_table = rhine_pci_tbl,
2386 .probe = rhine_init_one,
2387 .remove = rhine_remove_one,
2388 .shutdown = rhine_shutdown,
2389 .driver.pm = RHINE_PM_OPS,
2392 static struct dmi_system_id __initdata rhine_dmi_table[] = {
2396 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2397 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2403 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2404 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2410 static int __init rhine_init(void)
2412 /* when a module, this is printed whether or not devices are found in probe */
2414 pr_info("%s\n", version);
2416 if (dmi_check_system(rhine_dmi_table)) {
2417 /* these BIOSes fail at PXE boot if chip is in D3 */
2419 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2422 pr_info("avoid_D3 set\n");
2424 return pci_register_driver(&rhine_driver);
2428 static void __exit rhine_cleanup(void)
2430 pci_unregister_driver(&rhine_driver);
2434 module_init(rhine_init);
2435 module_exit(rhine_cleanup);