1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
19 See the file COPYING in this distribution for more information.
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
28 * Test Tx checksumming thoroughly
29 * Implement dev->tx_timeout
32 * Complete reset on PciErr
33 * Consider Rx interrupt mitigation using TimerIntr
34 * Investigate using skb->priority with h/w VLAN priority
35 * Investigate using High Priority Tx Queue with skb->priority
36 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
37 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
38 * Implement Tx software interrupt mitigation via
40 * The real minimum of CP_MIN_MTU is 4 bytes. However,
41 for this to be supported, one must(?) turn on packet padding.
42 * Support external MII transceivers (patch available)
45 * TX checksumming is considered experimental. It is off by
46 default, use ethtool to turn it on.
50 #define DRV_NAME "8139cp"
51 #define DRV_VERSION "1.2"
52 #define DRV_RELDATE "Mar 22, 2004"
55 #include <linux/config.h>
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/kernel.h>
59 #include <linux/compiler.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/init.h>
63 #include <linux/pci.h>
64 #include <linux/delay.h>
65 #include <linux/ethtool.h>
66 #include <linux/mii.h>
67 #include <linux/if_vlan.h>
68 #include <linux/crc32.h>
71 #include <linux/tcp.h>
72 #include <linux/udp.h>
73 #include <linux/cache.h>
76 #include <asm/uaccess.h>
78 /* VLAN tagging feature enable/disable */
79 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
80 #define CP_VLAN_TAG_USED 1
81 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
82 do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
84 #define CP_VLAN_TAG_USED 0
85 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
86 do { (tx_desc)->opts2 = 0; } while (0)
89 /* These identify the driver base version and may not be removed. */
90 static char version[] =
91 KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
93 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
94 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
95 MODULE_VERSION(DRV_VERSION);
96 MODULE_LICENSE("GPL");
98 static int debug = -1;
99 module_param(debug, int, 0);
100 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
102 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
103 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
104 static int multicast_filter_limit = 32;
105 module_param(multicast_filter_limit, int, 0);
106 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
108 #define PFX DRV_NAME ": "
112 #define TRUE (!FALSE)
115 #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
118 #define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
119 #define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
120 #define CP_REGS_SIZE (0xff + 1)
121 #define CP_REGS_VER 1 /* version 1 */
122 #define CP_RX_RING_SIZE 64
123 #define CP_TX_RING_SIZE 64
124 #define CP_RING_BYTES \
125 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
126 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
128 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
129 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
130 #define TX_BUFFS_AVAIL(CP) \
131 (((CP)->tx_tail <= (CP)->tx_head) ? \
132 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
133 (CP)->tx_tail - (CP)->tx_head - 1)
135 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
137 #define CP_INTERNAL_PHY 32
139 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
140 #define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
141 #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
142 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
143 #define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
145 /* Time in jiffies before concluding the transmitter is hung. */
146 #define TX_TIMEOUT (6*HZ)
148 /* hardware minimum and maximum for a single frame's data payload */
149 #define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
150 #define CP_MAX_MTU 4096
153 /* NIC register offsets */
154 MAC0 = 0x00, /* Ethernet hardware address. */
155 MAR0 = 0x08, /* Multicast filter. */
156 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
157 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
158 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
159 Cmd = 0x37, /* Command register */
160 IntrMask = 0x3C, /* Interrupt mask */
161 IntrStatus = 0x3E, /* Interrupt status */
162 TxConfig = 0x40, /* Tx configuration */
163 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
164 RxConfig = 0x44, /* Rx configuration */
165 RxMissed = 0x4C, /* 24 bits valid, write clears */
166 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
167 Config1 = 0x52, /* Config1 */
168 Config3 = 0x59, /* Config3 */
169 Config4 = 0x5A, /* Config4 */
170 MultiIntr = 0x5C, /* Multiple interrupt select */
171 BasicModeCtrl = 0x62, /* MII BMCR */
172 BasicModeStatus = 0x64, /* MII BMSR */
173 NWayAdvert = 0x66, /* MII ADVERTISE */
174 NWayLPAR = 0x68, /* MII LPA */
175 NWayExpansion = 0x6A, /* MII Expansion */
176 Config5 = 0xD8, /* Config5 */
177 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
178 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
179 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
180 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
181 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
182 TxThresh = 0xEC, /* Early Tx threshold */
183 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
184 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
186 /* Tx and Rx status descriptors */
187 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
188 RingEnd = (1 << 30), /* End of descriptor ring */
189 FirstFrag = (1 << 29), /* First segment of a packet */
190 LastFrag = (1 << 28), /* Final segment of a packet */
191 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
192 MSSShift = 16, /* MSS value position */
193 MSSMask = 0xfff, /* MSS value: 11 bits */
194 TxError = (1 << 23), /* Tx error summary */
195 RxError = (1 << 20), /* Rx error summary */
196 IPCS = (1 << 18), /* Calculate IP checksum */
197 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
198 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
199 TxVlanTag = (1 << 17), /* Add VLAN tag */
200 RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
201 IPFail = (1 << 15), /* IP checksum failed */
202 UDPFail = (1 << 14), /* UDP/IP checksum failed */
203 TCPFail = (1 << 13), /* TCP/IP checksum failed */
204 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
205 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
206 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
210 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
211 TxOWC = (1 << 22), /* Tx Out-of-window collision */
212 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
213 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
214 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
215 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
216 RxErrFrame = (1 << 27), /* Rx frame alignment error */
217 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
218 RxErrCRC = (1 << 18), /* Rx CRC error */
219 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
220 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
221 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
223 /* StatsAddr register */
224 DumpStats = (1 << 3), /* Begin stats dump */
226 /* RxConfig register */
227 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
228 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
229 AcceptErr = 0x20, /* Accept packets with CRC errors */
230 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
231 AcceptBroadcast = 0x08, /* Accept broadcast packets */
232 AcceptMulticast = 0x04, /* Accept multicast packets */
233 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
234 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
236 /* IntrMask / IntrStatus registers */
237 PciErr = (1 << 15), /* System error on the PCI bus */
238 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
239 LenChg = (1 << 13), /* Cable length change */
240 SWInt = (1 << 8), /* Software-requested interrupt */
241 TxEmpty = (1 << 7), /* No Tx descriptors available */
242 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
243 LinkChg = (1 << 5), /* Packet underrun, or link change */
244 RxEmpty = (1 << 4), /* No Rx descriptors available */
245 TxErr = (1 << 3), /* Tx error */
246 TxOK = (1 << 2), /* Tx packet sent */
247 RxErr = (1 << 1), /* Rx error */
248 RxOK = (1 << 0), /* Rx packet received */
249 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
250 but hardware likes to raise it */
252 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
253 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
254 RxErr | RxOK | IntrResvd,
256 /* C mode command register */
257 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
258 RxOn = (1 << 3), /* Rx mode enable */
259 TxOn = (1 << 2), /* Tx mode enable */
261 /* C+ mode command register */
262 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
263 RxChkSum = (1 << 5), /* Rx checksum offload enable */
264 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
265 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
266 CpRxOn = (1 << 1), /* Rx mode enable */
267 CpTxOn = (1 << 0), /* Tx mode enable */
269 /* Cfg9436 EEPROM control register */
270 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
271 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
273 /* TxConfig register */
274 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
275 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
277 /* Early Tx Threshold register */
278 TxThreshMask = 0x3f, /* Mask bits 5-0 */
279 TxThreshMax = 2048, /* Max early Tx threshold */
281 /* Config1 register */
282 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
283 LWACT = (1 << 4), /* LWAKE active mode */
284 PMEnable = (1 << 0), /* Enable various PM features of chip */
286 /* Config3 register */
287 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
288 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
289 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
291 /* Config4 register */
292 LWPTN = (1 << 1), /* LWAKE Pattern */
293 LWPME = (1 << 4), /* LANWAKE vs PMEB */
295 /* Config5 register */
296 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
297 MWF = (1 << 5), /* Accept Multicast wakeup frame */
298 UWF = (1 << 4), /* Accept Unicast wakeup frame */
299 LANWake = (1 << 1), /* Enable LANWake signal */
300 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
302 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
303 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
304 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
307 static const unsigned int cp_rx_config =
308 (RX_FIFO_THRESH << RxCfgFIFOShift) |
309 (RX_DMA_BURST << RxCfgDMAShift);
323 struct cp_dma_stats {
337 } __attribute__((packed));
339 struct cp_extra_stats {
340 unsigned long rx_frags;
345 struct net_device *dev;
349 struct pci_dev *pdev;
353 struct net_device_stats net_stats;
354 struct cp_extra_stats cp_stats;
355 struct cp_dma_stats *nic_stats;
356 dma_addr_t nic_stats_dma;
358 unsigned rx_tail ____cacheline_aligned;
359 struct cp_desc *rx_ring;
360 struct ring_info rx_skb[CP_RX_RING_SIZE];
363 unsigned tx_head ____cacheline_aligned;
366 struct cp_desc *tx_ring;
367 struct ring_info tx_skb[CP_TX_RING_SIZE];
371 struct vlan_group *vlgrp;
374 unsigned int wol_enabled : 1; /* Is Wake-on-LAN enabled? */
376 struct mii_if_info mii_if;
379 #define cpr8(reg) readb(cp->regs + (reg))
380 #define cpr16(reg) readw(cp->regs + (reg))
381 #define cpr32(reg) readl(cp->regs + (reg))
382 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
383 #define cpw16(reg,val) writew((val), cp->regs + (reg))
384 #define cpw32(reg,val) writel((val), cp->regs + (reg))
385 #define cpw8_f(reg,val) do { \
386 writeb((val), cp->regs + (reg)); \
387 readb(cp->regs + (reg)); \
389 #define cpw16_f(reg,val) do { \
390 writew((val), cp->regs + (reg)); \
391 readw(cp->regs + (reg)); \
393 #define cpw32_f(reg,val) do { \
394 writel((val), cp->regs + (reg)); \
395 readl(cp->regs + (reg)); \
399 static void __cp_set_rx_mode (struct net_device *dev);
400 static void cp_tx (struct cp_private *cp);
401 static void cp_clean_rings (struct cp_private *cp);
402 #ifdef CONFIG_NET_POLL_CONTROLLER
403 static void cp_poll_controller(struct net_device *dev);
406 static struct pci_device_id cp_pci_tbl[] = {
407 { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139,
408 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
409 { PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322,
410 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
413 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
416 const char str[ETH_GSTRING_LEN];
417 } ethtool_stats_keys[] = {
436 static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
438 struct cp_private *cp = netdev_priv(dev);
441 spin_lock_irqsave(&cp->lock, flags);
443 cp->cpcmd |= RxVlanOn;
444 cpw16(CpCmd, cp->cpcmd);
445 spin_unlock_irqrestore(&cp->lock, flags);
448 static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
450 struct cp_private *cp = netdev_priv(dev);
453 spin_lock_irqsave(&cp->lock, flags);
454 cp->cpcmd &= ~RxVlanOn;
455 cpw16(CpCmd, cp->cpcmd);
457 cp->vlgrp->vlan_devices[vid] = NULL;
458 spin_unlock_irqrestore(&cp->lock, flags);
460 #endif /* CP_VLAN_TAG_USED */
462 static inline void cp_set_rxbufsize (struct cp_private *cp)
464 unsigned int mtu = cp->dev->mtu;
466 if (mtu > ETH_DATA_LEN)
467 /* MTU + ethernet header + FCS + optional VLAN tag */
468 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
470 cp->rx_buf_sz = PKT_BUF_SZ;
473 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
474 struct cp_desc *desc)
476 skb->protocol = eth_type_trans (skb, cp->dev);
478 cp->net_stats.rx_packets++;
479 cp->net_stats.rx_bytes += skb->len;
480 cp->dev->last_rx = jiffies;
483 if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
484 vlan_hwaccel_receive_skb(skb, cp->vlgrp,
485 be16_to_cpu(desc->opts2 & 0xffff));
488 netif_receive_skb(skb);
491 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
494 if (netif_msg_rx_err (cp))
496 "%s: rx err, slot %d status 0x%x len %d\n",
497 cp->dev->name, rx_tail, status, len);
498 cp->net_stats.rx_errors++;
499 if (status & RxErrFrame)
500 cp->net_stats.rx_frame_errors++;
501 if (status & RxErrCRC)
502 cp->net_stats.rx_crc_errors++;
503 if ((status & RxErrRunt) || (status & RxErrLong))
504 cp->net_stats.rx_length_errors++;
505 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
506 cp->net_stats.rx_length_errors++;
507 if (status & RxErrFIFO)
508 cp->net_stats.rx_fifo_errors++;
511 static inline unsigned int cp_rx_csum_ok (u32 status)
513 unsigned int protocol = (status >> 16) & 0x3;
515 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
517 else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
519 else if ((protocol == RxProtoIP) && (!(status & IPFail)))
524 static int cp_rx_poll (struct net_device *dev, int *budget)
526 struct cp_private *cp = netdev_priv(dev);
527 unsigned rx_tail = cp->rx_tail;
528 unsigned rx_work = dev->quota;
533 cpw16(IntrStatus, cp_rx_intr_mask);
538 struct sk_buff *skb, *new_skb;
539 struct cp_desc *desc;
542 skb = cp->rx_skb[rx_tail].skb;
546 desc = &cp->rx_ring[rx_tail];
547 status = le32_to_cpu(desc->opts1);
548 if (status & DescOwn)
551 len = (status & 0x1fff) - 4;
552 mapping = cp->rx_skb[rx_tail].mapping;
554 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
555 /* we don't support incoming fragmented frames.
556 * instead, we attempt to ensure that the
557 * pre-allocated RX skbs are properly sized such
558 * that RX fragments are never encountered
560 cp_rx_err_acct(cp, rx_tail, status, len);
561 cp->net_stats.rx_dropped++;
562 cp->cp_stats.rx_frags++;
566 if (status & (RxError | RxErrFIFO)) {
567 cp_rx_err_acct(cp, rx_tail, status, len);
571 if (netif_msg_rx_status(cp))
572 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
573 cp->dev->name, rx_tail, status, len);
575 buflen = cp->rx_buf_sz + RX_OFFSET;
576 new_skb = dev_alloc_skb (buflen);
578 cp->net_stats.rx_dropped++;
582 skb_reserve(new_skb, RX_OFFSET);
583 new_skb->dev = cp->dev;
585 pci_unmap_single(cp->pdev, mapping,
586 buflen, PCI_DMA_FROMDEVICE);
588 /* Handle checksum offloading for incoming packets. */
589 if (cp_rx_csum_ok(status))
590 skb->ip_summed = CHECKSUM_UNNECESSARY;
592 skb->ip_summed = CHECKSUM_NONE;
597 cp->rx_skb[rx_tail].mapping =
598 pci_map_single(cp->pdev, new_skb->tail,
599 buflen, PCI_DMA_FROMDEVICE);
600 cp->rx_skb[rx_tail].skb = new_skb;
602 cp_rx_skb(cp, skb, desc);
606 cp->rx_ring[rx_tail].opts2 = 0;
607 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
608 if (rx_tail == (CP_RX_RING_SIZE - 1))
609 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
612 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
613 rx_tail = NEXT_RX(rx_tail);
619 cp->rx_tail = rx_tail;
624 /* if we did not reach work limit, then we're done with
625 * this round of polling
628 if (cpr16(IntrStatus) & cp_rx_intr_mask)
632 cpw16_f(IntrMask, cp_intr_mask);
633 __netif_rx_complete(dev);
639 return 1; /* not done */
643 cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
645 struct net_device *dev = dev_instance;
646 struct cp_private *cp;
649 if (unlikely(dev == NULL))
651 cp = netdev_priv(dev);
653 status = cpr16(IntrStatus);
654 if (!status || (status == 0xFFFF))
657 if (netif_msg_intr(cp))
658 printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
659 dev->name, status, cpr8(Cmd), cpr16(CpCmd));
661 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
663 spin_lock(&cp->lock);
665 /* close possible race's with dev_close */
666 if (unlikely(!netif_running(dev))) {
668 spin_unlock(&cp->lock);
672 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
673 if (netif_rx_schedule_prep(dev)) {
674 cpw16_f(IntrMask, cp_norx_intr_mask);
675 __netif_rx_schedule(dev);
678 if (status & (TxOK | TxErr | TxEmpty | SWInt))
680 if (status & LinkChg)
681 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
683 spin_unlock(&cp->lock);
685 if (status & PciErr) {
688 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
689 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
690 printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
691 dev->name, status, pci_status);
693 /* TODO: reset hardware */
699 #ifdef CONFIG_NET_POLL_CONTROLLER
701 * Polling receive - used by netconsole and other diagnostic tools
702 * to allow network i/o with interrupts disabled.
704 static void cp_poll_controller(struct net_device *dev)
706 disable_irq(dev->irq);
707 cp_interrupt(dev->irq, dev, NULL);
708 enable_irq(dev->irq);
712 static void cp_tx (struct cp_private *cp)
714 unsigned tx_head = cp->tx_head;
715 unsigned tx_tail = cp->tx_tail;
717 while (tx_tail != tx_head) {
722 status = le32_to_cpu(cp->tx_ring[tx_tail].opts1);
723 if (status & DescOwn)
726 skb = cp->tx_skb[tx_tail].skb;
730 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
731 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
733 if (status & LastFrag) {
734 if (status & (TxError | TxFIFOUnder)) {
735 if (netif_msg_tx_err(cp))
736 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
737 cp->dev->name, status);
738 cp->net_stats.tx_errors++;
740 cp->net_stats.tx_window_errors++;
741 if (status & TxMaxCol)
742 cp->net_stats.tx_aborted_errors++;
743 if (status & TxLinkFail)
744 cp->net_stats.tx_carrier_errors++;
745 if (status & TxFIFOUnder)
746 cp->net_stats.tx_fifo_errors++;
748 cp->net_stats.collisions +=
749 ((status >> TxColCntShift) & TxColCntMask);
750 cp->net_stats.tx_packets++;
751 cp->net_stats.tx_bytes += skb->len;
752 if (netif_msg_tx_done(cp))
753 printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
755 dev_kfree_skb_irq(skb);
758 cp->tx_skb[tx_tail].skb = NULL;
760 tx_tail = NEXT_TX(tx_tail);
763 cp->tx_tail = tx_tail;
765 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
766 netif_wake_queue(cp->dev);
769 static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
771 struct cp_private *cp = netdev_priv(dev);
779 spin_lock_irq(&cp->lock);
781 /* This is a hard error, log it. */
782 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
783 netif_stop_queue(dev);
784 spin_unlock_irq(&cp->lock);
785 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
791 if (cp->vlgrp && vlan_tx_tag_present(skb))
792 vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
796 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
797 if (dev->features & NETIF_F_TSO)
798 mss = skb_shinfo(skb)->tso_size;
800 if (skb_shinfo(skb)->nr_frags == 0) {
801 struct cp_desc *txd = &cp->tx_ring[entry];
806 mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
807 CP_VLAN_TX_TAG(txd, vlan_tag);
808 txd->addr = cpu_to_le64(mapping);
811 flags = eor | len | DescOwn | FirstFrag | LastFrag;
814 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
815 else if (skb->ip_summed == CHECKSUM_HW) {
816 const struct iphdr *ip = skb->nh.iph;
817 if (ip->protocol == IPPROTO_TCP)
818 flags |= IPCS | TCPCS;
819 else if (ip->protocol == IPPROTO_UDP)
820 flags |= IPCS | UDPCS;
822 WARN_ON(1); /* we need a WARN() */
825 txd->opts1 = cpu_to_le32(flags);
828 cp->tx_skb[entry].skb = skb;
829 cp->tx_skb[entry].mapping = mapping;
830 cp->tx_skb[entry].len = len;
831 entry = NEXT_TX(entry);
834 u32 first_len, first_eor;
835 dma_addr_t first_mapping;
836 int frag, first_entry = entry;
837 const struct iphdr *ip = skb->nh.iph;
839 /* We must give this initial chunk to the device last.
840 * Otherwise we could race with the device.
843 first_len = skb_headlen(skb);
844 first_mapping = pci_map_single(cp->pdev, skb->data,
845 first_len, PCI_DMA_TODEVICE);
846 cp->tx_skb[entry].skb = skb;
847 cp->tx_skb[entry].mapping = first_mapping;
848 cp->tx_skb[entry].len = first_len;
849 entry = NEXT_TX(entry);
851 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
852 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
857 len = this_frag->size;
858 mapping = pci_map_single(cp->pdev,
859 ((void *) page_address(this_frag->page) +
860 this_frag->page_offset),
861 len, PCI_DMA_TODEVICE);
862 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
864 ctrl = eor | len | DescOwn;
868 ((mss & MSSMask) << MSSShift);
869 else if (skb->ip_summed == CHECKSUM_HW) {
870 if (ip->protocol == IPPROTO_TCP)
871 ctrl |= IPCS | TCPCS;
872 else if (ip->protocol == IPPROTO_UDP)
873 ctrl |= IPCS | UDPCS;
878 if (frag == skb_shinfo(skb)->nr_frags - 1)
881 txd = &cp->tx_ring[entry];
882 CP_VLAN_TX_TAG(txd, vlan_tag);
883 txd->addr = cpu_to_le64(mapping);
886 txd->opts1 = cpu_to_le32(ctrl);
889 cp->tx_skb[entry].skb = skb;
890 cp->tx_skb[entry].mapping = mapping;
891 cp->tx_skb[entry].len = len;
892 entry = NEXT_TX(entry);
895 txd = &cp->tx_ring[first_entry];
896 CP_VLAN_TX_TAG(txd, vlan_tag);
897 txd->addr = cpu_to_le64(first_mapping);
900 if (skb->ip_summed == CHECKSUM_HW) {
901 if (ip->protocol == IPPROTO_TCP)
902 txd->opts1 = cpu_to_le32(first_eor | first_len |
903 FirstFrag | DescOwn |
905 else if (ip->protocol == IPPROTO_UDP)
906 txd->opts1 = cpu_to_le32(first_eor | first_len |
907 FirstFrag | DescOwn |
912 txd->opts1 = cpu_to_le32(first_eor | first_len |
913 FirstFrag | DescOwn);
917 if (netif_msg_tx_queued(cp))
918 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
919 dev->name, entry, skb->len);
920 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
921 netif_stop_queue(dev);
923 spin_unlock_irq(&cp->lock);
925 cpw8(TxPoll, NormalTxPoll);
926 dev->trans_start = jiffies;
931 /* Set or clear the multicast filter for this adaptor.
932 This routine is not state sensitive and need not be SMP locked. */
934 static void __cp_set_rx_mode (struct net_device *dev)
936 struct cp_private *cp = netdev_priv(dev);
937 u32 mc_filter[2]; /* Multicast hash filter */
941 /* Note: do not reorder, GCC is clever about common statements. */
942 if (dev->flags & IFF_PROMISC) {
943 /* Unconditionally log net taps. */
944 printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
947 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
949 mc_filter[1] = mc_filter[0] = 0xffffffff;
950 } else if ((dev->mc_count > multicast_filter_limit)
951 || (dev->flags & IFF_ALLMULTI)) {
952 /* Too many to filter perfectly -- accept all multicasts. */
953 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
954 mc_filter[1] = mc_filter[0] = 0xffffffff;
956 struct dev_mc_list *mclist;
957 rx_mode = AcceptBroadcast | AcceptMyPhys;
958 mc_filter[1] = mc_filter[0] = 0;
959 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
960 i++, mclist = mclist->next) {
961 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
963 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
964 rx_mode |= AcceptMulticast;
968 /* We can safely update without stopping the chip. */
969 tmp = cp_rx_config | rx_mode;
970 if (cp->rx_config != tmp) {
971 cpw32_f (RxConfig, tmp);
974 cpw32_f (MAR0 + 0, mc_filter[0]);
975 cpw32_f (MAR0 + 4, mc_filter[1]);
978 static void cp_set_rx_mode (struct net_device *dev)
981 struct cp_private *cp = netdev_priv(dev);
983 spin_lock_irqsave (&cp->lock, flags);
984 __cp_set_rx_mode(dev);
985 spin_unlock_irqrestore (&cp->lock, flags);
988 static void __cp_get_stats(struct cp_private *cp)
990 /* only lower 24 bits valid; write any value to clear */
991 cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
995 static struct net_device_stats *cp_get_stats(struct net_device *dev)
997 struct cp_private *cp = netdev_priv(dev);
1000 /* The chip only need report frame silently dropped. */
1001 spin_lock_irqsave(&cp->lock, flags);
1002 if (netif_running(dev) && netif_device_present(dev))
1004 spin_unlock_irqrestore(&cp->lock, flags);
1006 return &cp->net_stats;
1009 static void cp_stop_hw (struct cp_private *cp)
1011 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
1012 cpw16_f(IntrMask, 0);
1015 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
1018 cp->tx_head = cp->tx_tail = 0;
1021 static void cp_reset_hw (struct cp_private *cp)
1023 unsigned work = 1000;
1025 cpw8(Cmd, CmdReset);
1028 if (!(cpr8(Cmd) & CmdReset))
1031 set_current_state(TASK_UNINTERRUPTIBLE);
1032 schedule_timeout(10);
1035 printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
1038 static inline void cp_start_hw (struct cp_private *cp)
1040 cpw16(CpCmd, cp->cpcmd);
1041 cpw8(Cmd, RxOn | TxOn);
1044 static void cp_init_hw (struct cp_private *cp)
1046 struct net_device *dev = cp->dev;
1047 dma_addr_t ring_dma;
1051 cpw8_f (Cfg9346, Cfg9346_Unlock);
1053 /* Restore our idea of the MAC address. */
1054 cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
1055 cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
1058 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1060 __cp_set_rx_mode(dev);
1061 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1063 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1064 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1065 cpw8(Config3, PARMEnable);
1066 cp->wol_enabled = 0;
1068 cpw8(Config5, cpr8(Config5) & PMEStatus);
1070 cpw32_f(HiTxRingAddr, 0);
1071 cpw32_f(HiTxRingAddr + 4, 0);
1073 ring_dma = cp->ring_dma;
1074 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1075 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1077 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1078 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1079 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1081 cpw16(MultiIntr, 0);
1083 cpw16_f(IntrMask, cp_intr_mask);
1085 cpw8_f(Cfg9346, Cfg9346_Lock);
1088 static int cp_refill_rx (struct cp_private *cp)
1092 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1093 struct sk_buff *skb;
1095 skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
1100 skb_reserve(skb, RX_OFFSET);
1102 cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
1103 skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1104 cp->rx_skb[i].skb = skb;
1106 cp->rx_ring[i].opts2 = 0;
1107 cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping);
1108 if (i == (CP_RX_RING_SIZE - 1))
1109 cp->rx_ring[i].opts1 =
1110 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1112 cp->rx_ring[i].opts1 =
1113 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1123 static int cp_init_rings (struct cp_private *cp)
1125 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1126 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1129 cp->tx_head = cp->tx_tail = 0;
1131 return cp_refill_rx (cp);
1134 static int cp_alloc_rings (struct cp_private *cp)
1138 mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
1143 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1145 mem += (CP_RING_BYTES - CP_STATS_SIZE);
1146 cp->nic_stats = mem;
1147 cp->nic_stats_dma = cp->ring_dma + (CP_RING_BYTES - CP_STATS_SIZE);
1149 return cp_init_rings(cp);
1152 static void cp_clean_rings (struct cp_private *cp)
1156 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1157 if (cp->rx_skb[i].skb) {
1158 pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping,
1159 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1160 dev_kfree_skb(cp->rx_skb[i].skb);
1164 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1165 if (cp->tx_skb[i].skb) {
1166 struct sk_buff *skb = cp->tx_skb[i].skb;
1168 pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping,
1169 cp->tx_skb[i].len, PCI_DMA_TODEVICE);
1170 if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag)
1172 cp->net_stats.tx_dropped++;
1176 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1177 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1179 memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE);
1180 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
1183 static void cp_free_rings (struct cp_private *cp)
1186 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1189 cp->nic_stats = NULL;
1192 static int cp_open (struct net_device *dev)
1194 struct cp_private *cp = netdev_priv(dev);
1197 if (netif_msg_ifup(cp))
1198 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1200 rc = cp_alloc_rings(cp);
1206 rc = request_irq(dev->irq, cp_interrupt, SA_SHIRQ, dev->name, dev);
1210 netif_carrier_off(dev);
1211 mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
1212 netif_start_queue(dev);
1222 static int cp_close (struct net_device *dev)
1224 struct cp_private *cp = netdev_priv(dev);
1225 unsigned long flags;
1227 if (netif_msg_ifdown(cp))
1228 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1230 spin_lock_irqsave(&cp->lock, flags);
1232 netif_stop_queue(dev);
1233 netif_carrier_off(dev);
1237 spin_unlock_irqrestore(&cp->lock, flags);
1239 synchronize_irq(dev->irq);
1240 free_irq(dev->irq, dev);
1247 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1249 struct cp_private *cp = netdev_priv(dev);
1251 unsigned long flags;
1253 /* check for invalid MTU, according to hardware limits */
1254 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1257 /* if network interface not up, no need for complexity */
1258 if (!netif_running(dev)) {
1260 cp_set_rxbufsize(cp); /* set new rx buf size */
1264 spin_lock_irqsave(&cp->lock, flags);
1266 cp_stop_hw(cp); /* stop h/w and free rings */
1270 cp_set_rxbufsize(cp); /* set new rx buf size */
1272 rc = cp_init_rings(cp); /* realloc and restart h/w */
1275 spin_unlock_irqrestore(&cp->lock, flags);
1281 static char mii_2_8139_map[8] = {
1292 static int mdio_read(struct net_device *dev, int phy_id, int location)
1294 struct cp_private *cp = netdev_priv(dev);
1296 return location < 8 && mii_2_8139_map[location] ?
1297 readw(cp->regs + mii_2_8139_map[location]) : 0;
1301 static void mdio_write(struct net_device *dev, int phy_id, int location,
1304 struct cp_private *cp = netdev_priv(dev);
1306 if (location == 0) {
1307 cpw8(Cfg9346, Cfg9346_Unlock);
1308 cpw16(BasicModeCtrl, value);
1309 cpw8(Cfg9346, Cfg9346_Lock);
1310 } else if (location < 8 && mii_2_8139_map[location])
1311 cpw16(mii_2_8139_map[location], value);
1314 /* Set the ethtool Wake-on-LAN settings */
1315 static int netdev_set_wol (struct cp_private *cp,
1316 const struct ethtool_wolinfo *wol)
1320 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1321 /* If WOL is being disabled, no need for complexity */
1323 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1324 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1327 cpw8 (Cfg9346, Cfg9346_Unlock);
1328 cpw8 (Config3, options);
1329 cpw8 (Cfg9346, Cfg9346_Lock);
1331 options = 0; /* Paranoia setting */
1332 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1333 /* If WOL is being disabled, no need for complexity */
1335 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1336 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1337 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1340 cpw8 (Config5, options);
1342 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1347 /* Get the ethtool Wake-on-LAN settings */
1348 static void netdev_get_wol (struct cp_private *cp,
1349 struct ethtool_wolinfo *wol)
1353 wol->wolopts = 0; /* Start from scratch */
1354 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1355 WAKE_MCAST | WAKE_UCAST;
1356 /* We don't need to go on if WOL is disabled */
1357 if (!cp->wol_enabled) return;
1359 options = cpr8 (Config3);
1360 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1361 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1363 options = 0; /* Paranoia setting */
1364 options = cpr8 (Config5);
1365 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1366 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1367 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1370 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1372 struct cp_private *cp = netdev_priv(dev);
1374 strcpy (info->driver, DRV_NAME);
1375 strcpy (info->version, DRV_VERSION);
1376 strcpy (info->bus_info, pci_name(cp->pdev));
1379 static int cp_get_regs_len(struct net_device *dev)
1381 return CP_REGS_SIZE;
1384 static int cp_get_stats_count (struct net_device *dev)
1386 return CP_NUM_STATS;
1389 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1391 struct cp_private *cp = netdev_priv(dev);
1393 unsigned long flags;
1395 spin_lock_irqsave(&cp->lock, flags);
1396 rc = mii_ethtool_gset(&cp->mii_if, cmd);
1397 spin_unlock_irqrestore(&cp->lock, flags);
1402 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1404 struct cp_private *cp = netdev_priv(dev);
1406 unsigned long flags;
1408 spin_lock_irqsave(&cp->lock, flags);
1409 rc = mii_ethtool_sset(&cp->mii_if, cmd);
1410 spin_unlock_irqrestore(&cp->lock, flags);
1415 static int cp_nway_reset(struct net_device *dev)
1417 struct cp_private *cp = netdev_priv(dev);
1418 return mii_nway_restart(&cp->mii_if);
1421 static u32 cp_get_msglevel(struct net_device *dev)
1423 struct cp_private *cp = netdev_priv(dev);
1424 return cp->msg_enable;
1427 static void cp_set_msglevel(struct net_device *dev, u32 value)
1429 struct cp_private *cp = netdev_priv(dev);
1430 cp->msg_enable = value;
1433 static u32 cp_get_rx_csum(struct net_device *dev)
1435 struct cp_private *cp = netdev_priv(dev);
1436 return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1439 static int cp_set_rx_csum(struct net_device *dev, u32 data)
1441 struct cp_private *cp = netdev_priv(dev);
1442 u16 cmd = cp->cpcmd, newcmd;
1449 newcmd &= ~RxChkSum;
1451 if (newcmd != cmd) {
1452 unsigned long flags;
1454 spin_lock_irqsave(&cp->lock, flags);
1456 cpw16_f(CpCmd, newcmd);
1457 spin_unlock_irqrestore(&cp->lock, flags);
1463 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1466 struct cp_private *cp = netdev_priv(dev);
1467 unsigned long flags;
1469 if (regs->len < CP_REGS_SIZE)
1470 return /* -EINVAL */;
1472 regs->version = CP_REGS_VER;
1474 spin_lock_irqsave(&cp->lock, flags);
1475 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1476 spin_unlock_irqrestore(&cp->lock, flags);
1479 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1481 struct cp_private *cp = netdev_priv(dev);
1482 unsigned long flags;
1484 spin_lock_irqsave (&cp->lock, flags);
1485 netdev_get_wol (cp, wol);
1486 spin_unlock_irqrestore (&cp->lock, flags);
1489 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1491 struct cp_private *cp = netdev_priv(dev);
1492 unsigned long flags;
1495 spin_lock_irqsave (&cp->lock, flags);
1496 rc = netdev_set_wol (cp, wol);
1497 spin_unlock_irqrestore (&cp->lock, flags);
1502 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1504 switch (stringset) {
1506 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
1514 static void cp_get_ethtool_stats (struct net_device *dev,
1515 struct ethtool_stats *estats, u64 *tmp_stats)
1517 struct cp_private *cp = netdev_priv(dev);
1518 unsigned int work = 100;
1521 /* begin NIC statistics dump */
1522 cpw32(StatsAddr + 4, (cp->nic_stats_dma >> 16) >> 16);
1523 cpw32(StatsAddr, (cp->nic_stats_dma & 0xffffffff) | DumpStats);
1526 while (work-- > 0) {
1527 if ((cpr32(StatsAddr) & DumpStats) == 0)
1532 if (cpr32(StatsAddr) & DumpStats)
1536 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok);
1537 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok);
1538 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_err);
1539 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_err);
1540 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->rx_fifo);
1541 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->frame_align);
1542 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_1col);
1543 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_mcol);
1544 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_phys);
1545 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_bcast);
1546 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_ok_mcast);
1547 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_abort);
1548 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_underrun);
1549 tmp_stats[i++] = cp->cp_stats.rx_frags;
1550 if (i != CP_NUM_STATS)
1554 static struct ethtool_ops cp_ethtool_ops = {
1555 .get_drvinfo = cp_get_drvinfo,
1556 .get_regs_len = cp_get_regs_len,
1557 .get_stats_count = cp_get_stats_count,
1558 .get_settings = cp_get_settings,
1559 .set_settings = cp_set_settings,
1560 .nway_reset = cp_nway_reset,
1561 .get_link = ethtool_op_get_link,
1562 .get_msglevel = cp_get_msglevel,
1563 .set_msglevel = cp_set_msglevel,
1564 .get_rx_csum = cp_get_rx_csum,
1565 .set_rx_csum = cp_set_rx_csum,
1566 .get_tx_csum = ethtool_op_get_tx_csum,
1567 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1568 .get_sg = ethtool_op_get_sg,
1569 .set_sg = ethtool_op_set_sg,
1570 .get_tso = ethtool_op_get_tso,
1571 .set_tso = ethtool_op_set_tso,
1572 .get_regs = cp_get_regs,
1573 .get_wol = cp_get_wol,
1574 .set_wol = cp_set_wol,
1575 .get_strings = cp_get_strings,
1576 .get_ethtool_stats = cp_get_ethtool_stats,
1579 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1581 struct cp_private *cp = netdev_priv(dev);
1583 unsigned long flags;
1585 if (!netif_running(dev))
1588 spin_lock_irqsave(&cp->lock, flags);
1589 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1590 spin_unlock_irqrestore(&cp->lock, flags);
1594 /* Serial EEPROM section. */
1596 /* EEPROM_Ctrl bits. */
1597 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1598 #define EE_CS 0x08 /* EEPROM chip select. */
1599 #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1600 #define EE_WRITE_0 0x00
1601 #define EE_WRITE_1 0x02
1602 #define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1603 #define EE_ENB (0x80 | EE_CS)
1605 /* Delay between EEPROM clock transitions.
1606 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1609 #define eeprom_delay() readl(ee_addr)
1611 /* The EEPROM commands include the alway-set leading bit. */
1612 #define EE_WRITE_CMD (5)
1613 #define EE_READ_CMD (6)
1614 #define EE_ERASE_CMD (7)
1616 static int read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1619 unsigned retval = 0;
1620 void __iomem *ee_addr = ioaddr + Cfg9346;
1621 int read_cmd = location | (EE_READ_CMD << addr_len);
1623 writeb (EE_ENB & ~EE_CS, ee_addr);
1624 writeb (EE_ENB, ee_addr);
1627 /* Shift the read command bits out. */
1628 for (i = 4 + addr_len; i >= 0; i--) {
1629 int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1630 writeb (EE_ENB | dataval, ee_addr);
1632 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1635 writeb (EE_ENB, ee_addr);
1638 for (i = 16; i > 0; i--) {
1639 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1642 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1644 writeb (EE_ENB, ee_addr);
1648 /* Terminate the EEPROM access. */
1649 writeb (~EE_CS, ee_addr);
1655 /* Put the board into D3cold state and wait for WakeUp signal */
1656 static void cp_set_d3_state (struct cp_private *cp)
1658 pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1659 pci_set_power_state (cp->pdev, PCI_D3hot);
1662 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1664 struct net_device *dev;
1665 struct cp_private *cp;
1669 unsigned int addr_len, i, pci_using_dac;
1673 static int version_printed;
1674 if (version_printed++ == 0)
1675 printk("%s", version);
1678 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
1680 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1681 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
1682 printk(KERN_ERR PFX "pci dev %s (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
1683 pci_name(pdev), pdev->vendor, pdev->device, pci_rev);
1684 printk(KERN_ERR PFX "Try the \"8139too\" driver instead.\n");
1688 dev = alloc_etherdev(sizeof(struct cp_private));
1691 SET_MODULE_OWNER(dev);
1692 SET_NETDEV_DEV(dev, &pdev->dev);
1694 cp = netdev_priv(dev);
1697 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1698 spin_lock_init (&cp->lock);
1699 cp->mii_if.dev = dev;
1700 cp->mii_if.mdio_read = mdio_read;
1701 cp->mii_if.mdio_write = mdio_write;
1702 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1703 cp->mii_if.phy_id_mask = 0x1f;
1704 cp->mii_if.reg_num_mask = 0x1f;
1705 cp_set_rxbufsize(cp);
1707 rc = pci_enable_device(pdev);
1711 rc = pci_set_mwi(pdev);
1713 goto err_out_disable;
1715 rc = pci_request_regions(pdev, DRV_NAME);
1719 pciaddr = pci_resource_start(pdev, 1);
1722 printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
1726 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1728 printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n",
1729 pci_resource_len(pdev, 1), pci_name(pdev));
1733 /* Configure DMA attributes. */
1734 if ((sizeof(dma_addr_t) > 4) &&
1735 !pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL) &&
1736 !pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
1741 rc = pci_set_dma_mask(pdev, 0xffffffffULL);
1743 printk(KERN_ERR PFX "No usable DMA configuration, "
1747 rc = pci_set_consistent_dma_mask(pdev, 0xffffffffULL);
1749 printk(KERN_ERR PFX "No usable consistent DMA configuration, "
1755 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1756 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1758 regs = ioremap(pciaddr, CP_REGS_SIZE);
1761 printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n",
1762 pci_resource_len(pdev, 1), pciaddr, pci_name(pdev));
1765 dev->base_addr = (unsigned long) regs;
1770 /* read MAC address from EEPROM */
1771 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1772 for (i = 0; i < 3; i++)
1773 ((u16 *) (dev->dev_addr))[i] =
1774 le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
1776 dev->open = cp_open;
1777 dev->stop = cp_close;
1778 dev->set_multicast_list = cp_set_rx_mode;
1779 dev->hard_start_xmit = cp_start_xmit;
1780 dev->get_stats = cp_get_stats;
1781 dev->do_ioctl = cp_ioctl;
1782 dev->poll = cp_rx_poll;
1783 #ifdef CONFIG_NET_POLL_CONTROLLER
1784 dev->poll_controller = cp_poll_controller;
1786 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
1788 dev->change_mtu = cp_change_mtu;
1790 dev->ethtool_ops = &cp_ethtool_ops;
1792 dev->tx_timeout = cp_tx_timeout;
1793 dev->watchdog_timeo = TX_TIMEOUT;
1796 #if CP_VLAN_TAG_USED
1797 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1798 dev->vlan_rx_register = cp_vlan_rx_register;
1799 dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid;
1803 dev->features |= NETIF_F_HIGHDMA;
1805 #if 0 /* disabled by default until verified */
1806 dev->features |= NETIF_F_TSO;
1809 dev->irq = pdev->irq;
1811 rc = register_netdev(dev);
1815 printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
1816 "%02x:%02x:%02x:%02x:%02x:%02x, "
1820 dev->dev_addr[0], dev->dev_addr[1],
1821 dev->dev_addr[2], dev->dev_addr[3],
1822 dev->dev_addr[4], dev->dev_addr[5],
1825 pci_set_drvdata(pdev, dev);
1827 /* enable busmastering and memory-write-invalidate */
1828 pci_set_master(pdev);
1830 if (cp->wol_enabled) cp_set_d3_state (cp);
1837 pci_release_regions(pdev);
1839 pci_clear_mwi(pdev);
1841 pci_disable_device(pdev);
1847 static void cp_remove_one (struct pci_dev *pdev)
1849 struct net_device *dev = pci_get_drvdata(pdev);
1850 struct cp_private *cp = netdev_priv(dev);
1854 unregister_netdev(dev);
1856 if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0);
1857 pci_release_regions(pdev);
1858 pci_clear_mwi(pdev);
1859 pci_disable_device(pdev);
1860 pci_set_drvdata(pdev, NULL);
1865 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
1867 struct net_device *dev;
1868 struct cp_private *cp;
1869 unsigned long flags;
1871 dev = pci_get_drvdata (pdev);
1872 cp = netdev_priv(dev);
1874 if (!dev || !netif_running (dev)) return 0;
1876 netif_device_detach (dev);
1877 netif_stop_queue (dev);
1879 spin_lock_irqsave (&cp->lock, flags);
1881 /* Disable Rx and Tx */
1882 cpw16 (IntrMask, 0);
1883 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
1885 spin_unlock_irqrestore (&cp->lock, flags);
1887 if (cp->pdev && cp->wol_enabled) {
1888 pci_save_state (cp->pdev);
1889 cp_set_d3_state (cp);
1895 static int cp_resume (struct pci_dev *pdev)
1897 struct net_device *dev;
1898 struct cp_private *cp;
1900 dev = pci_get_drvdata (pdev);
1901 cp = netdev_priv(dev);
1903 netif_device_attach (dev);
1905 if (cp->pdev && cp->wol_enabled) {
1906 pci_set_power_state (cp->pdev, PCI_D0);
1907 pci_restore_state (cp->pdev);
1911 netif_start_queue (dev);
1915 #endif /* CONFIG_PM */
1917 static struct pci_driver cp_driver = {
1919 .id_table = cp_pci_tbl,
1920 .probe = cp_init_one,
1921 .remove = cp_remove_one,
1923 .resume = cp_resume,
1924 .suspend = cp_suspend,
1928 static int __init cp_init (void)
1931 printk("%s", version);
1933 return pci_module_init (&cp_driver);
1936 static void __exit cp_exit (void)
1938 pci_unregister_driver (&cp_driver);
1941 module_init(cp_init);
1942 module_exit(cp_exit);