1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
19 See the file COPYING in this distribution for more information.
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
28 * Test Tx checksumming thoroughly
31 * Complete reset on PciErr
32 * Consider Rx interrupt mitigation using TimerIntr
33 * Investigate using skb->priority with h/w VLAN priority
34 * Investigate using High Priority Tx Queue with skb->priority
35 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
36 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
37 * Implement Tx software interrupt mitigation via
39 * The real minimum of CP_MIN_MTU is 4 bytes. However,
40 for this to be supported, one must(?) turn on packet padding.
41 * Support external MII transceivers (patch available)
44 * TX checksumming is considered experimental. It is off by
45 default, use ethtool to turn it on.
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51 #define DRV_NAME "8139cp"
52 #define DRV_VERSION "1.3"
53 #define DRV_RELDATE "Mar 22, 2004"
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/kernel.h>
59 #include <linux/compiler.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/init.h>
63 #include <linux/interrupt.h>
64 #include <linux/pci.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/delay.h>
67 #include <linux/ethtool.h>
68 #include <linux/gfp.h>
69 #include <linux/mii.h>
70 #include <linux/if_vlan.h>
71 #include <linux/crc32.h>
74 #include <linux/tcp.h>
75 #include <linux/udp.h>
76 #include <linux/cache.h>
79 #include <asm/uaccess.h>
81 /* These identify the driver base version and may not be removed. */
82 static char version[] =
83 DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
85 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
86 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
87 MODULE_VERSION(DRV_VERSION);
88 MODULE_LICENSE("GPL");
90 static int debug = -1;
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
94 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
95 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
96 static int multicast_filter_limit = 32;
97 module_param(multicast_filter_limit, int, 0);
98 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
100 #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
103 #define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
104 #define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
105 #define CP_REGS_SIZE (0xff + 1)
106 #define CP_REGS_VER 1 /* version 1 */
107 #define CP_RX_RING_SIZE 64
108 #define CP_TX_RING_SIZE 64
109 #define CP_RING_BYTES \
110 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
111 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
113 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
114 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
115 #define TX_BUFFS_AVAIL(CP) \
116 (((CP)->tx_tail <= (CP)->tx_head) ? \
117 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
118 (CP)->tx_tail - (CP)->tx_head - 1)
120 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
121 #define CP_INTERNAL_PHY 32
123 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
124 #define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
125 #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
126 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
127 #define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
129 /* Time in jiffies before concluding the transmitter is hung. */
130 #define TX_TIMEOUT (6*HZ)
132 /* hardware minimum and maximum for a single frame's data payload */
133 #define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
134 #define CP_MAX_MTU 4096
137 /* NIC register offsets */
138 MAC0 = 0x00, /* Ethernet hardware address. */
139 MAR0 = 0x08, /* Multicast filter. */
140 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
141 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
142 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
143 Cmd = 0x37, /* Command register */
144 IntrMask = 0x3C, /* Interrupt mask */
145 IntrStatus = 0x3E, /* Interrupt status */
146 TxConfig = 0x40, /* Tx configuration */
147 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
148 RxConfig = 0x44, /* Rx configuration */
149 RxMissed = 0x4C, /* 24 bits valid, write clears */
150 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
151 Config1 = 0x52, /* Config1 */
152 Config3 = 0x59, /* Config3 */
153 Config4 = 0x5A, /* Config4 */
154 MultiIntr = 0x5C, /* Multiple interrupt select */
155 BasicModeCtrl = 0x62, /* MII BMCR */
156 BasicModeStatus = 0x64, /* MII BMSR */
157 NWayAdvert = 0x66, /* MII ADVERTISE */
158 NWayLPAR = 0x68, /* MII LPA */
159 NWayExpansion = 0x6A, /* MII Expansion */
160 Config5 = 0xD8, /* Config5 */
161 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
162 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
163 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
164 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
165 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
166 TxThresh = 0xEC, /* Early Tx threshold */
167 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
168 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
170 /* Tx and Rx status descriptors */
171 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
172 RingEnd = (1 << 30), /* End of descriptor ring */
173 FirstFrag = (1 << 29), /* First segment of a packet */
174 LastFrag = (1 << 28), /* Final segment of a packet */
175 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
176 MSSShift = 16, /* MSS value position */
177 MSSMask = 0xfff, /* MSS value: 11 bits */
178 TxError = (1 << 23), /* Tx error summary */
179 RxError = (1 << 20), /* Rx error summary */
180 IPCS = (1 << 18), /* Calculate IP checksum */
181 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
182 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
183 TxVlanTag = (1 << 17), /* Add VLAN tag */
184 RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
185 IPFail = (1 << 15), /* IP checksum failed */
186 UDPFail = (1 << 14), /* UDP/IP checksum failed */
187 TCPFail = (1 << 13), /* TCP/IP checksum failed */
188 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
189 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
190 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
194 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
195 TxOWC = (1 << 22), /* Tx Out-of-window collision */
196 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
197 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
198 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
199 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
200 RxErrFrame = (1 << 27), /* Rx frame alignment error */
201 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
202 RxErrCRC = (1 << 18), /* Rx CRC error */
203 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
204 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
205 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
207 /* StatsAddr register */
208 DumpStats = (1 << 3), /* Begin stats dump */
210 /* RxConfig register */
211 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
212 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
213 AcceptErr = 0x20, /* Accept packets with CRC errors */
214 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
215 AcceptBroadcast = 0x08, /* Accept broadcast packets */
216 AcceptMulticast = 0x04, /* Accept multicast packets */
217 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
218 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
220 /* IntrMask / IntrStatus registers */
221 PciErr = (1 << 15), /* System error on the PCI bus */
222 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
223 LenChg = (1 << 13), /* Cable length change */
224 SWInt = (1 << 8), /* Software-requested interrupt */
225 TxEmpty = (1 << 7), /* No Tx descriptors available */
226 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
227 LinkChg = (1 << 5), /* Packet underrun, or link change */
228 RxEmpty = (1 << 4), /* No Rx descriptors available */
229 TxErr = (1 << 3), /* Tx error */
230 TxOK = (1 << 2), /* Tx packet sent */
231 RxErr = (1 << 1), /* Rx error */
232 RxOK = (1 << 0), /* Rx packet received */
233 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
234 but hardware likes to raise it */
236 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
237 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
238 RxErr | RxOK | IntrResvd,
240 /* C mode command register */
241 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
242 RxOn = (1 << 3), /* Rx mode enable */
243 TxOn = (1 << 2), /* Tx mode enable */
245 /* C+ mode command register */
246 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
247 RxChkSum = (1 << 5), /* Rx checksum offload enable */
248 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
249 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
250 CpRxOn = (1 << 1), /* Rx mode enable */
251 CpTxOn = (1 << 0), /* Tx mode enable */
253 /* Cfg9436 EEPROM control register */
254 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
255 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
257 /* TxConfig register */
258 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
259 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
261 /* Early Tx Threshold register */
262 TxThreshMask = 0x3f, /* Mask bits 5-0 */
263 TxThreshMax = 2048, /* Max early Tx threshold */
265 /* Config1 register */
266 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
267 LWACT = (1 << 4), /* LWAKE active mode */
268 PMEnable = (1 << 0), /* Enable various PM features of chip */
270 /* Config3 register */
271 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
272 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
273 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
275 /* Config4 register */
276 LWPTN = (1 << 1), /* LWAKE Pattern */
277 LWPME = (1 << 4), /* LANWAKE vs PMEB */
279 /* Config5 register */
280 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
281 MWF = (1 << 5), /* Accept Multicast wakeup frame */
282 UWF = (1 << 4), /* Accept Unicast wakeup frame */
283 LANWake = (1 << 1), /* Enable LANWake signal */
284 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
286 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
287 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
288 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
291 static const unsigned int cp_rx_config =
292 (RX_FIFO_THRESH << RxCfgFIFOShift) |
293 (RX_DMA_BURST << RxCfgDMAShift);
301 struct cp_dma_stats {
317 struct cp_extra_stats {
318 unsigned long rx_frags;
323 struct net_device *dev;
327 struct napi_struct napi;
329 struct pci_dev *pdev;
333 struct cp_extra_stats cp_stats;
335 unsigned rx_head ____cacheline_aligned;
337 struct cp_desc *rx_ring;
338 struct sk_buff *rx_skb[CP_RX_RING_SIZE];
340 unsigned tx_head ____cacheline_aligned;
342 struct cp_desc *tx_ring;
343 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
346 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
350 struct mii_if_info mii_if;
353 #define cpr8(reg) readb(cp->regs + (reg))
354 #define cpr16(reg) readw(cp->regs + (reg))
355 #define cpr32(reg) readl(cp->regs + (reg))
356 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
357 #define cpw16(reg,val) writew((val), cp->regs + (reg))
358 #define cpw32(reg,val) writel((val), cp->regs + (reg))
359 #define cpw8_f(reg,val) do { \
360 writeb((val), cp->regs + (reg)); \
361 readb(cp->regs + (reg)); \
363 #define cpw16_f(reg,val) do { \
364 writew((val), cp->regs + (reg)); \
365 readw(cp->regs + (reg)); \
367 #define cpw32_f(reg,val) do { \
368 writel((val), cp->regs + (reg)); \
369 readl(cp->regs + (reg)); \
373 static void __cp_set_rx_mode (struct net_device *dev);
374 static void cp_tx (struct cp_private *cp);
375 static void cp_clean_rings (struct cp_private *cp);
376 #ifdef CONFIG_NET_POLL_CONTROLLER
377 static void cp_poll_controller(struct net_device *dev);
379 static int cp_get_eeprom_len(struct net_device *dev);
380 static int cp_get_eeprom(struct net_device *dev,
381 struct ethtool_eeprom *eeprom, u8 *data);
382 static int cp_set_eeprom(struct net_device *dev,
383 struct ethtool_eeprom *eeprom, u8 *data);
386 const char str[ETH_GSTRING_LEN];
387 } ethtool_stats_keys[] = {
405 static inline void cp_set_rxbufsize (struct cp_private *cp)
407 unsigned int mtu = cp->dev->mtu;
409 if (mtu > ETH_DATA_LEN)
410 /* MTU + ethernet header + FCS + optional VLAN tag */
411 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
413 cp->rx_buf_sz = PKT_BUF_SZ;
416 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
417 struct cp_desc *desc)
419 u32 opts2 = le32_to_cpu(desc->opts2);
421 skb->protocol = eth_type_trans (skb, cp->dev);
423 cp->dev->stats.rx_packets++;
424 cp->dev->stats.rx_bytes += skb->len;
426 if (opts2 & RxVlanTagged)
427 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
429 napi_gro_receive(&cp->napi, skb);
432 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
435 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
436 rx_tail, status, len);
437 cp->dev->stats.rx_errors++;
438 if (status & RxErrFrame)
439 cp->dev->stats.rx_frame_errors++;
440 if (status & RxErrCRC)
441 cp->dev->stats.rx_crc_errors++;
442 if ((status & RxErrRunt) || (status & RxErrLong))
443 cp->dev->stats.rx_length_errors++;
444 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
445 cp->dev->stats.rx_length_errors++;
446 if (status & RxErrFIFO)
447 cp->dev->stats.rx_fifo_errors++;
450 static inline unsigned int cp_rx_csum_ok (u32 status)
452 unsigned int protocol = (status >> 16) & 0x3;
454 if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
455 ((protocol == RxProtoUDP) && !(status & UDPFail)))
461 static int cp_rx_poll(struct napi_struct *napi, int budget)
463 struct cp_private *cp = container_of(napi, struct cp_private, napi);
464 struct net_device *dev = cp->dev;
465 unsigned int rx_tail = cp->rx_tail;
470 cpw16(IntrStatus, cp_rx_intr_mask);
472 while (rx < budget) {
474 dma_addr_t mapping, new_mapping;
475 struct sk_buff *skb, *new_skb;
476 struct cp_desc *desc;
477 const unsigned buflen = cp->rx_buf_sz;
479 skb = cp->rx_skb[rx_tail];
482 desc = &cp->rx_ring[rx_tail];
483 status = le32_to_cpu(desc->opts1);
484 if (status & DescOwn)
487 len = (status & 0x1fff) - 4;
488 mapping = le64_to_cpu(desc->addr);
490 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
491 /* we don't support incoming fragmented frames.
492 * instead, we attempt to ensure that the
493 * pre-allocated RX skbs are properly sized such
494 * that RX fragments are never encountered
496 cp_rx_err_acct(cp, rx_tail, status, len);
497 dev->stats.rx_dropped++;
498 cp->cp_stats.rx_frags++;
502 if (status & (RxError | RxErrFIFO)) {
503 cp_rx_err_acct(cp, rx_tail, status, len);
507 netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
508 rx_tail, status, len);
510 new_skb = napi_alloc_skb(napi, buflen);
512 dev->stats.rx_dropped++;
516 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
518 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
519 dev->stats.rx_dropped++;
524 dma_unmap_single(&cp->pdev->dev, mapping,
525 buflen, PCI_DMA_FROMDEVICE);
527 /* Handle checksum offloading for incoming packets. */
528 if (cp_rx_csum_ok(status))
529 skb->ip_summed = CHECKSUM_UNNECESSARY;
531 skb_checksum_none_assert(skb);
535 cp->rx_skb[rx_tail] = new_skb;
537 cp_rx_skb(cp, skb, desc);
539 mapping = new_mapping;
542 cp->rx_ring[rx_tail].opts2 = 0;
543 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
544 if (rx_tail == (CP_RX_RING_SIZE - 1))
545 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
548 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
549 rx_tail = NEXT_RX(rx_tail);
552 cp->rx_tail = rx_tail;
554 /* if we did not reach work limit, then we're done with
555 * this round of polling
560 if (cpr16(IntrStatus) & cp_rx_intr_mask)
563 napi_gro_flush(napi, false);
564 spin_lock_irqsave(&cp->lock, flags);
565 __napi_complete(napi);
566 cpw16_f(IntrMask, cp_intr_mask);
567 spin_unlock_irqrestore(&cp->lock, flags);
573 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
575 struct net_device *dev = dev_instance;
576 struct cp_private *cp;
580 if (unlikely(dev == NULL))
582 cp = netdev_priv(dev);
584 spin_lock(&cp->lock);
586 status = cpr16(IntrStatus);
587 if (!status || (status == 0xFFFF))
592 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
593 status, cpr8(Cmd), cpr16(CpCmd));
595 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
597 /* close possible race's with dev_close */
598 if (unlikely(!netif_running(dev))) {
603 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
604 if (napi_schedule_prep(&cp->napi)) {
605 cpw16_f(IntrMask, cp_norx_intr_mask);
606 __napi_schedule(&cp->napi);
609 if (status & (TxOK | TxErr | TxEmpty | SWInt))
611 if (status & LinkChg)
612 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
615 if (status & PciErr) {
618 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
619 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
620 netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
623 /* TODO: reset hardware */
627 spin_unlock(&cp->lock);
629 return IRQ_RETVAL(handled);
632 #ifdef CONFIG_NET_POLL_CONTROLLER
634 * Polling receive - used by netconsole and other diagnostic tools
635 * to allow network i/o with interrupts disabled.
637 static void cp_poll_controller(struct net_device *dev)
639 struct cp_private *cp = netdev_priv(dev);
640 const int irq = cp->pdev->irq;
643 cp_interrupt(irq, dev);
648 static void cp_tx (struct cp_private *cp)
650 unsigned tx_head = cp->tx_head;
651 unsigned tx_tail = cp->tx_tail;
652 unsigned bytes_compl = 0, pkts_compl = 0;
654 while (tx_tail != tx_head) {
655 struct cp_desc *txd = cp->tx_ring + tx_tail;
660 status = le32_to_cpu(txd->opts1);
661 if (status & DescOwn)
664 skb = cp->tx_skb[tx_tail];
667 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
668 le32_to_cpu(txd->opts1) & 0xffff,
671 if (status & LastFrag) {
672 if (status & (TxError | TxFIFOUnder)) {
673 netif_dbg(cp, tx_err, cp->dev,
674 "tx err, status 0x%x\n", status);
675 cp->dev->stats.tx_errors++;
677 cp->dev->stats.tx_window_errors++;
678 if (status & TxMaxCol)
679 cp->dev->stats.tx_aborted_errors++;
680 if (status & TxLinkFail)
681 cp->dev->stats.tx_carrier_errors++;
682 if (status & TxFIFOUnder)
683 cp->dev->stats.tx_fifo_errors++;
685 cp->dev->stats.collisions +=
686 ((status >> TxColCntShift) & TxColCntMask);
687 cp->dev->stats.tx_packets++;
688 cp->dev->stats.tx_bytes += skb->len;
689 netif_dbg(cp, tx_done, cp->dev,
690 "tx done, slot %d\n", tx_tail);
692 bytes_compl += skb->len;
694 dev_kfree_skb_irq(skb);
697 cp->tx_skb[tx_tail] = NULL;
699 tx_tail = NEXT_TX(tx_tail);
702 cp->tx_tail = tx_tail;
704 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
705 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
706 netif_wake_queue(cp->dev);
709 static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
711 return skb_vlan_tag_present(skb) ?
712 TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
715 static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
716 int first, int entry_last)
720 skb_frag_t *this_frag;
721 for (frag = 0; frag+first < entry_last; frag++) {
723 cp->tx_skb[index] = NULL;
724 txd = &cp->tx_ring[index];
725 this_frag = &skb_shinfo(skb)->frags[frag];
726 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
727 skb_frag_size(this_frag), PCI_DMA_TODEVICE);
731 static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
732 struct net_device *dev)
734 struct cp_private *cp = netdev_priv(dev);
737 unsigned long intr_flags;
741 spin_lock_irqsave(&cp->lock, intr_flags);
743 /* This is a hard error, log it. */
744 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
745 netif_stop_queue(dev);
746 spin_unlock_irqrestore(&cp->lock, intr_flags);
747 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
748 return NETDEV_TX_BUSY;
752 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
753 mss = skb_shinfo(skb)->gso_size;
755 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
757 if (skb_shinfo(skb)->nr_frags == 0) {
758 struct cp_desc *txd = &cp->tx_ring[entry];
763 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
764 if (dma_mapping_error(&cp->pdev->dev, mapping))
768 txd->addr = cpu_to_le64(mapping);
771 flags = eor | len | DescOwn | FirstFrag | LastFrag;
774 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
775 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
776 const struct iphdr *ip = ip_hdr(skb);
777 if (ip->protocol == IPPROTO_TCP)
778 flags |= IPCS | TCPCS;
779 else if (ip->protocol == IPPROTO_UDP)
780 flags |= IPCS | UDPCS;
782 WARN_ON(1); /* we need a WARN() */
785 txd->opts1 = cpu_to_le32(flags);
788 cp->tx_skb[entry] = skb;
789 entry = NEXT_TX(entry);
792 u32 first_len, first_eor;
793 dma_addr_t first_mapping;
794 int frag, first_entry = entry;
795 const struct iphdr *ip = ip_hdr(skb);
797 /* We must give this initial chunk to the device last.
798 * Otherwise we could race with the device.
801 first_len = skb_headlen(skb);
802 first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
803 first_len, PCI_DMA_TODEVICE);
804 if (dma_mapping_error(&cp->pdev->dev, first_mapping))
807 cp->tx_skb[entry] = skb;
808 entry = NEXT_TX(entry);
810 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
811 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
816 len = skb_frag_size(this_frag);
817 mapping = dma_map_single(&cp->pdev->dev,
818 skb_frag_address(this_frag),
819 len, PCI_DMA_TODEVICE);
820 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
821 unwind_tx_frag_mapping(cp, skb, first_entry, entry);
825 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
827 ctrl = eor | len | DescOwn;
831 ((mss & MSSMask) << MSSShift);
832 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
833 if (ip->protocol == IPPROTO_TCP)
834 ctrl |= IPCS | TCPCS;
835 else if (ip->protocol == IPPROTO_UDP)
836 ctrl |= IPCS | UDPCS;
841 if (frag == skb_shinfo(skb)->nr_frags - 1)
844 txd = &cp->tx_ring[entry];
846 txd->addr = cpu_to_le64(mapping);
849 txd->opts1 = cpu_to_le32(ctrl);
852 cp->tx_skb[entry] = skb;
853 entry = NEXT_TX(entry);
856 txd = &cp->tx_ring[first_entry];
858 txd->addr = cpu_to_le64(first_mapping);
861 if (skb->ip_summed == CHECKSUM_PARTIAL) {
862 if (ip->protocol == IPPROTO_TCP)
863 txd->opts1 = cpu_to_le32(first_eor | first_len |
864 FirstFrag | DescOwn |
866 else if (ip->protocol == IPPROTO_UDP)
867 txd->opts1 = cpu_to_le32(first_eor | first_len |
868 FirstFrag | DescOwn |
873 txd->opts1 = cpu_to_le32(first_eor | first_len |
874 FirstFrag | DescOwn);
879 netdev_sent_queue(dev, skb->len);
880 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
882 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
883 netif_stop_queue(dev);
886 spin_unlock_irqrestore(&cp->lock, intr_flags);
888 cpw8(TxPoll, NormalTxPoll);
892 dev_kfree_skb_any(skb);
893 cp->dev->stats.tx_dropped++;
897 /* Set or clear the multicast filter for this adaptor.
898 This routine is not state sensitive and need not be SMP locked. */
900 static void __cp_set_rx_mode (struct net_device *dev)
902 struct cp_private *cp = netdev_priv(dev);
903 u32 mc_filter[2]; /* Multicast hash filter */
906 /* Note: do not reorder, GCC is clever about common statements. */
907 if (dev->flags & IFF_PROMISC) {
908 /* Unconditionally log net taps. */
910 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
912 mc_filter[1] = mc_filter[0] = 0xffffffff;
913 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
914 (dev->flags & IFF_ALLMULTI)) {
915 /* Too many to filter perfectly -- accept all multicasts. */
916 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
917 mc_filter[1] = mc_filter[0] = 0xffffffff;
919 struct netdev_hw_addr *ha;
920 rx_mode = AcceptBroadcast | AcceptMyPhys;
921 mc_filter[1] = mc_filter[0] = 0;
922 netdev_for_each_mc_addr(ha, dev) {
923 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
925 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
926 rx_mode |= AcceptMulticast;
930 /* We can safely update without stopping the chip. */
931 cp->rx_config = cp_rx_config | rx_mode;
932 cpw32_f(RxConfig, cp->rx_config);
934 cpw32_f (MAR0 + 0, mc_filter[0]);
935 cpw32_f (MAR0 + 4, mc_filter[1]);
938 static void cp_set_rx_mode (struct net_device *dev)
941 struct cp_private *cp = netdev_priv(dev);
943 spin_lock_irqsave (&cp->lock, flags);
944 __cp_set_rx_mode(dev);
945 spin_unlock_irqrestore (&cp->lock, flags);
948 static void __cp_get_stats(struct cp_private *cp)
950 /* only lower 24 bits valid; write any value to clear */
951 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
955 static struct net_device_stats *cp_get_stats(struct net_device *dev)
957 struct cp_private *cp = netdev_priv(dev);
960 /* The chip only need report frame silently dropped. */
961 spin_lock_irqsave(&cp->lock, flags);
962 if (netif_running(dev) && netif_device_present(dev))
964 spin_unlock_irqrestore(&cp->lock, flags);
969 static void cp_stop_hw (struct cp_private *cp)
971 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
972 cpw16_f(IntrMask, 0);
975 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
978 cp->tx_head = cp->tx_tail = 0;
980 netdev_reset_queue(cp->dev);
983 static void cp_reset_hw (struct cp_private *cp)
985 unsigned work = 1000;
990 if (!(cpr8(Cmd) & CmdReset))
993 schedule_timeout_uninterruptible(10);
996 netdev_err(cp->dev, "hardware reset timeout\n");
999 static inline void cp_start_hw (struct cp_private *cp)
1001 dma_addr_t ring_dma;
1003 cpw16(CpCmd, cp->cpcmd);
1006 * These (at least TxRingAddr) need to be configured after the
1007 * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33
1008 * (C+ Command Register) recommends that these and more be configured
1009 * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
1010 * it's been observed that the TxRingAddr is actually reset to garbage
1011 * when C+ mode Tx is enabled in CpCmd.
1013 cpw32_f(HiTxRingAddr, 0);
1014 cpw32_f(HiTxRingAddr + 4, 0);
1016 ring_dma = cp->ring_dma;
1017 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1018 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1020 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1021 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1022 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1025 * Strictly speaking, the datasheet says this should be enabled
1026 * *before* setting the descriptor addresses. But what, then, would
1027 * prevent it from doing DMA to random unconfigured addresses?
1028 * This variant appears to work fine.
1030 cpw8(Cmd, RxOn | TxOn);
1032 netdev_reset_queue(cp->dev);
1035 static void cp_enable_irq(struct cp_private *cp)
1037 cpw16_f(IntrMask, cp_intr_mask);
1040 static void cp_init_hw (struct cp_private *cp)
1042 struct net_device *dev = cp->dev;
1046 cpw8_f (Cfg9346, Cfg9346_Unlock);
1048 /* Restore our idea of the MAC address. */
1049 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1050 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1053 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1055 __cp_set_rx_mode(dev);
1056 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1058 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1059 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1060 cpw8(Config3, PARMEnable);
1061 cp->wol_enabled = 0;
1063 cpw8(Config5, cpr8(Config5) & PMEStatus);
1065 cpw16(MultiIntr, 0);
1067 cpw8_f(Cfg9346, Cfg9346_Lock);
1070 static int cp_refill_rx(struct cp_private *cp)
1072 struct net_device *dev = cp->dev;
1075 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1076 struct sk_buff *skb;
1079 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1083 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1084 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1085 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
1089 cp->rx_skb[i] = skb;
1091 cp->rx_ring[i].opts2 = 0;
1092 cp->rx_ring[i].addr = cpu_to_le64(mapping);
1093 if (i == (CP_RX_RING_SIZE - 1))
1094 cp->rx_ring[i].opts1 =
1095 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1097 cp->rx_ring[i].opts1 =
1098 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1108 static void cp_init_rings_index (struct cp_private *cp)
1111 cp->tx_head = cp->tx_tail = 0;
1114 static int cp_init_rings (struct cp_private *cp)
1116 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1117 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1119 cp_init_rings_index(cp);
1121 return cp_refill_rx (cp);
1124 static int cp_alloc_rings (struct cp_private *cp)
1126 struct device *d = &cp->pdev->dev;
1130 mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
1135 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1137 rc = cp_init_rings(cp);
1139 dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1144 static void cp_clean_rings (struct cp_private *cp)
1146 struct cp_desc *desc;
1149 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1150 if (cp->rx_skb[i]) {
1151 desc = cp->rx_ring + i;
1152 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1153 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1154 dev_kfree_skb_any(cp->rx_skb[i]);
1158 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1159 if (cp->tx_skb[i]) {
1160 struct sk_buff *skb = cp->tx_skb[i];
1162 desc = cp->tx_ring + i;
1163 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1164 le32_to_cpu(desc->opts1) & 0xffff,
1166 if (le32_to_cpu(desc->opts1) & LastFrag)
1167 dev_kfree_skb_any(skb);
1168 cp->dev->stats.tx_dropped++;
1171 netdev_reset_queue(cp->dev);
1173 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1174 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1176 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1177 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1180 static void cp_free_rings (struct cp_private *cp)
1183 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1189 static int cp_open (struct net_device *dev)
1191 struct cp_private *cp = netdev_priv(dev);
1192 const int irq = cp->pdev->irq;
1195 netif_dbg(cp, ifup, dev, "enabling interface\n");
1197 rc = cp_alloc_rings(cp);
1201 napi_enable(&cp->napi);
1205 rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1211 netif_carrier_off(dev);
1212 mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1213 netif_start_queue(dev);
1218 napi_disable(&cp->napi);
1224 static int cp_close (struct net_device *dev)
1226 struct cp_private *cp = netdev_priv(dev);
1227 unsigned long flags;
1229 napi_disable(&cp->napi);
1231 netif_dbg(cp, ifdown, dev, "disabling interface\n");
1233 spin_lock_irqsave(&cp->lock, flags);
1235 netif_stop_queue(dev);
1236 netif_carrier_off(dev);
1240 spin_unlock_irqrestore(&cp->lock, flags);
1242 free_irq(cp->pdev->irq, dev);
1248 static void cp_tx_timeout(struct net_device *dev)
1250 struct cp_private *cp = netdev_priv(dev);
1251 unsigned long flags;
1254 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1255 cpr8(Cmd), cpr16(CpCmd),
1256 cpr16(IntrStatus), cpr16(IntrMask));
1258 spin_lock_irqsave(&cp->lock, flags);
1262 rc = cp_init_rings(cp);
1264 __cp_set_rx_mode(dev);
1267 netif_wake_queue(dev);
1269 spin_unlock_irqrestore(&cp->lock, flags);
1272 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1274 struct cp_private *cp = netdev_priv(dev);
1276 /* check for invalid MTU, according to hardware limits */
1277 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1280 /* if network interface not up, no need for complexity */
1281 if (!netif_running(dev)) {
1283 cp_set_rxbufsize(cp); /* set new rx buf size */
1287 /* network IS up, close it, reset MTU, and come up again. */
1290 cp_set_rxbufsize(cp);
1291 return cp_open(dev);
1294 static const char mii_2_8139_map[8] = {
1305 static int mdio_read(struct net_device *dev, int phy_id, int location)
1307 struct cp_private *cp = netdev_priv(dev);
1309 return location < 8 && mii_2_8139_map[location] ?
1310 readw(cp->regs + mii_2_8139_map[location]) : 0;
1314 static void mdio_write(struct net_device *dev, int phy_id, int location,
1317 struct cp_private *cp = netdev_priv(dev);
1319 if (location == 0) {
1320 cpw8(Cfg9346, Cfg9346_Unlock);
1321 cpw16(BasicModeCtrl, value);
1322 cpw8(Cfg9346, Cfg9346_Lock);
1323 } else if (location < 8 && mii_2_8139_map[location])
1324 cpw16(mii_2_8139_map[location], value);
1327 /* Set the ethtool Wake-on-LAN settings */
1328 static int netdev_set_wol (struct cp_private *cp,
1329 const struct ethtool_wolinfo *wol)
1333 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1334 /* If WOL is being disabled, no need for complexity */
1336 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1337 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1340 cpw8 (Cfg9346, Cfg9346_Unlock);
1341 cpw8 (Config3, options);
1342 cpw8 (Cfg9346, Cfg9346_Lock);
1344 options = 0; /* Paranoia setting */
1345 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1346 /* If WOL is being disabled, no need for complexity */
1348 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1349 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1350 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1353 cpw8 (Config5, options);
1355 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1360 /* Get the ethtool Wake-on-LAN settings */
1361 static void netdev_get_wol (struct cp_private *cp,
1362 struct ethtool_wolinfo *wol)
1366 wol->wolopts = 0; /* Start from scratch */
1367 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1368 WAKE_MCAST | WAKE_UCAST;
1369 /* We don't need to go on if WOL is disabled */
1370 if (!cp->wol_enabled) return;
1372 options = cpr8 (Config3);
1373 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1374 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1376 options = 0; /* Paranoia setting */
1377 options = cpr8 (Config5);
1378 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1379 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1380 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1383 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1385 struct cp_private *cp = netdev_priv(dev);
1387 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1388 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1389 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1392 static void cp_get_ringparam(struct net_device *dev,
1393 struct ethtool_ringparam *ring)
1395 ring->rx_max_pending = CP_RX_RING_SIZE;
1396 ring->tx_max_pending = CP_TX_RING_SIZE;
1397 ring->rx_pending = CP_RX_RING_SIZE;
1398 ring->tx_pending = CP_TX_RING_SIZE;
1401 static int cp_get_regs_len(struct net_device *dev)
1403 return CP_REGS_SIZE;
1406 static int cp_get_sset_count (struct net_device *dev, int sset)
1410 return CP_NUM_STATS;
1416 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1418 struct cp_private *cp = netdev_priv(dev);
1420 unsigned long flags;
1422 spin_lock_irqsave(&cp->lock, flags);
1423 rc = mii_ethtool_gset(&cp->mii_if, cmd);
1424 spin_unlock_irqrestore(&cp->lock, flags);
1429 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1431 struct cp_private *cp = netdev_priv(dev);
1433 unsigned long flags;
1435 spin_lock_irqsave(&cp->lock, flags);
1436 rc = mii_ethtool_sset(&cp->mii_if, cmd);
1437 spin_unlock_irqrestore(&cp->lock, flags);
1442 static int cp_nway_reset(struct net_device *dev)
1444 struct cp_private *cp = netdev_priv(dev);
1445 return mii_nway_restart(&cp->mii_if);
1448 static u32 cp_get_msglevel(struct net_device *dev)
1450 struct cp_private *cp = netdev_priv(dev);
1451 return cp->msg_enable;
1454 static void cp_set_msglevel(struct net_device *dev, u32 value)
1456 struct cp_private *cp = netdev_priv(dev);
1457 cp->msg_enable = value;
1460 static int cp_set_features(struct net_device *dev, netdev_features_t features)
1462 struct cp_private *cp = netdev_priv(dev);
1463 unsigned long flags;
1465 if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1468 spin_lock_irqsave(&cp->lock, flags);
1470 if (features & NETIF_F_RXCSUM)
1471 cp->cpcmd |= RxChkSum;
1473 cp->cpcmd &= ~RxChkSum;
1475 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1476 cp->cpcmd |= RxVlanOn;
1478 cp->cpcmd &= ~RxVlanOn;
1480 cpw16_f(CpCmd, cp->cpcmd);
1481 spin_unlock_irqrestore(&cp->lock, flags);
1486 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1489 struct cp_private *cp = netdev_priv(dev);
1490 unsigned long flags;
1492 if (regs->len < CP_REGS_SIZE)
1493 return /* -EINVAL */;
1495 regs->version = CP_REGS_VER;
1497 spin_lock_irqsave(&cp->lock, flags);
1498 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1499 spin_unlock_irqrestore(&cp->lock, flags);
1502 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1504 struct cp_private *cp = netdev_priv(dev);
1505 unsigned long flags;
1507 spin_lock_irqsave (&cp->lock, flags);
1508 netdev_get_wol (cp, wol);
1509 spin_unlock_irqrestore (&cp->lock, flags);
1512 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1514 struct cp_private *cp = netdev_priv(dev);
1515 unsigned long flags;
1518 spin_lock_irqsave (&cp->lock, flags);
1519 rc = netdev_set_wol (cp, wol);
1520 spin_unlock_irqrestore (&cp->lock, flags);
1525 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1527 switch (stringset) {
1529 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
1537 static void cp_get_ethtool_stats (struct net_device *dev,
1538 struct ethtool_stats *estats, u64 *tmp_stats)
1540 struct cp_private *cp = netdev_priv(dev);
1541 struct cp_dma_stats *nic_stats;
1545 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1550 /* begin NIC statistics dump */
1551 cpw32(StatsAddr + 4, (u64)dma >> 32);
1552 cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1555 for (i = 0; i < 1000; i++) {
1556 if ((cpr32(StatsAddr) & DumpStats) == 0)
1560 cpw32(StatsAddr, 0);
1561 cpw32(StatsAddr + 4, 0);
1565 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1566 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1567 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1568 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1569 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1570 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1571 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1572 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1573 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1574 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1575 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1576 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1577 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1578 tmp_stats[i++] = cp->cp_stats.rx_frags;
1579 BUG_ON(i != CP_NUM_STATS);
1581 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1584 static const struct ethtool_ops cp_ethtool_ops = {
1585 .get_drvinfo = cp_get_drvinfo,
1586 .get_regs_len = cp_get_regs_len,
1587 .get_sset_count = cp_get_sset_count,
1588 .get_settings = cp_get_settings,
1589 .set_settings = cp_set_settings,
1590 .nway_reset = cp_nway_reset,
1591 .get_link = ethtool_op_get_link,
1592 .get_msglevel = cp_get_msglevel,
1593 .set_msglevel = cp_set_msglevel,
1594 .get_regs = cp_get_regs,
1595 .get_wol = cp_get_wol,
1596 .set_wol = cp_set_wol,
1597 .get_strings = cp_get_strings,
1598 .get_ethtool_stats = cp_get_ethtool_stats,
1599 .get_eeprom_len = cp_get_eeprom_len,
1600 .get_eeprom = cp_get_eeprom,
1601 .set_eeprom = cp_set_eeprom,
1602 .get_ringparam = cp_get_ringparam,
1605 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1607 struct cp_private *cp = netdev_priv(dev);
1609 unsigned long flags;
1611 if (!netif_running(dev))
1614 spin_lock_irqsave(&cp->lock, flags);
1615 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1616 spin_unlock_irqrestore(&cp->lock, flags);
1620 static int cp_set_mac_address(struct net_device *dev, void *p)
1622 struct cp_private *cp = netdev_priv(dev);
1623 struct sockaddr *addr = p;
1625 if (!is_valid_ether_addr(addr->sa_data))
1626 return -EADDRNOTAVAIL;
1628 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1630 spin_lock_irq(&cp->lock);
1632 cpw8_f(Cfg9346, Cfg9346_Unlock);
1633 cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1634 cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1635 cpw8_f(Cfg9346, Cfg9346_Lock);
1637 spin_unlock_irq(&cp->lock);
1642 /* Serial EEPROM section. */
1644 /* EEPROM_Ctrl bits. */
1645 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1646 #define EE_CS 0x08 /* EEPROM chip select. */
1647 #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1648 #define EE_WRITE_0 0x00
1649 #define EE_WRITE_1 0x02
1650 #define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1651 #define EE_ENB (0x80 | EE_CS)
1653 /* Delay between EEPROM clock transitions.
1654 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1657 #define eeprom_delay() readb(ee_addr)
1659 /* The EEPROM commands include the alway-set leading bit. */
1660 #define EE_EXTEND_CMD (4)
1661 #define EE_WRITE_CMD (5)
1662 #define EE_READ_CMD (6)
1663 #define EE_ERASE_CMD (7)
1665 #define EE_EWDS_ADDR (0)
1666 #define EE_WRAL_ADDR (1)
1667 #define EE_ERAL_ADDR (2)
1668 #define EE_EWEN_ADDR (3)
1670 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1672 static void eeprom_cmd_start(void __iomem *ee_addr)
1674 writeb (EE_ENB & ~EE_CS, ee_addr);
1675 writeb (EE_ENB, ee_addr);
1679 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1683 /* Shift the command bits out. */
1684 for (i = cmd_len - 1; i >= 0; i--) {
1685 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1686 writeb (EE_ENB | dataval, ee_addr);
1688 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1691 writeb (EE_ENB, ee_addr);
1695 static void eeprom_cmd_end(void __iomem *ee_addr)
1701 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1704 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1706 eeprom_cmd_start(ee_addr);
1707 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1708 eeprom_cmd_end(ee_addr);
1711 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1715 void __iomem *ee_addr = ioaddr + Cfg9346;
1716 int read_cmd = location | (EE_READ_CMD << addr_len);
1718 eeprom_cmd_start(ee_addr);
1719 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1721 for (i = 16; i > 0; i--) {
1722 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1725 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1727 writeb (EE_ENB, ee_addr);
1731 eeprom_cmd_end(ee_addr);
1736 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1740 void __iomem *ee_addr = ioaddr + Cfg9346;
1741 int write_cmd = location | (EE_WRITE_CMD << addr_len);
1743 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1745 eeprom_cmd_start(ee_addr);
1746 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1747 eeprom_cmd(ee_addr, val, 16);
1748 eeprom_cmd_end(ee_addr);
1750 eeprom_cmd_start(ee_addr);
1751 for (i = 0; i < 20000; i++)
1752 if (readb(ee_addr) & EE_DATA_READ)
1754 eeprom_cmd_end(ee_addr);
1756 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1759 static int cp_get_eeprom_len(struct net_device *dev)
1761 struct cp_private *cp = netdev_priv(dev);
1764 spin_lock_irq(&cp->lock);
1765 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1766 spin_unlock_irq(&cp->lock);
1771 static int cp_get_eeprom(struct net_device *dev,
1772 struct ethtool_eeprom *eeprom, u8 *data)
1774 struct cp_private *cp = netdev_priv(dev);
1775 unsigned int addr_len;
1777 u32 offset = eeprom->offset >> 1;
1778 u32 len = eeprom->len;
1781 eeprom->magic = CP_EEPROM_MAGIC;
1783 spin_lock_irq(&cp->lock);
1785 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1787 if (eeprom->offset & 1) {
1788 val = read_eeprom(cp->regs, offset, addr_len);
1789 data[i++] = (u8)(val >> 8);
1793 while (i < len - 1) {
1794 val = read_eeprom(cp->regs, offset, addr_len);
1795 data[i++] = (u8)val;
1796 data[i++] = (u8)(val >> 8);
1801 val = read_eeprom(cp->regs, offset, addr_len);
1805 spin_unlock_irq(&cp->lock);
1809 static int cp_set_eeprom(struct net_device *dev,
1810 struct ethtool_eeprom *eeprom, u8 *data)
1812 struct cp_private *cp = netdev_priv(dev);
1813 unsigned int addr_len;
1815 u32 offset = eeprom->offset >> 1;
1816 u32 len = eeprom->len;
1819 if (eeprom->magic != CP_EEPROM_MAGIC)
1822 spin_lock_irq(&cp->lock);
1824 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1826 if (eeprom->offset & 1) {
1827 val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1828 val |= (u16)data[i++] << 8;
1829 write_eeprom(cp->regs, offset, val, addr_len);
1833 while (i < len - 1) {
1834 val = (u16)data[i++];
1835 val |= (u16)data[i++] << 8;
1836 write_eeprom(cp->regs, offset, val, addr_len);
1841 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1842 val |= (u16)data[i];
1843 write_eeprom(cp->regs, offset, val, addr_len);
1846 spin_unlock_irq(&cp->lock);
1850 /* Put the board into D3cold state and wait for WakeUp signal */
1851 static void cp_set_d3_state (struct cp_private *cp)
1853 pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */
1854 pci_set_power_state (cp->pdev, PCI_D3hot);
1857 static const struct net_device_ops cp_netdev_ops = {
1858 .ndo_open = cp_open,
1859 .ndo_stop = cp_close,
1860 .ndo_validate_addr = eth_validate_addr,
1861 .ndo_set_mac_address = cp_set_mac_address,
1862 .ndo_set_rx_mode = cp_set_rx_mode,
1863 .ndo_get_stats = cp_get_stats,
1864 .ndo_do_ioctl = cp_ioctl,
1865 .ndo_start_xmit = cp_start_xmit,
1866 .ndo_tx_timeout = cp_tx_timeout,
1867 .ndo_set_features = cp_set_features,
1868 .ndo_change_mtu = cp_change_mtu,
1870 #ifdef CONFIG_NET_POLL_CONTROLLER
1871 .ndo_poll_controller = cp_poll_controller,
1875 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1877 struct net_device *dev;
1878 struct cp_private *cp;
1881 resource_size_t pciaddr;
1882 unsigned int addr_len, i, pci_using_dac;
1884 pr_info_once("%s", version);
1886 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1887 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1888 dev_info(&pdev->dev,
1889 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1890 pdev->vendor, pdev->device, pdev->revision);
1894 dev = alloc_etherdev(sizeof(struct cp_private));
1897 SET_NETDEV_DEV(dev, &pdev->dev);
1899 cp = netdev_priv(dev);
1902 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1903 spin_lock_init (&cp->lock);
1904 cp->mii_if.dev = dev;
1905 cp->mii_if.mdio_read = mdio_read;
1906 cp->mii_if.mdio_write = mdio_write;
1907 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1908 cp->mii_if.phy_id_mask = 0x1f;
1909 cp->mii_if.reg_num_mask = 0x1f;
1910 cp_set_rxbufsize(cp);
1912 rc = pci_enable_device(pdev);
1916 rc = pci_set_mwi(pdev);
1918 goto err_out_disable;
1920 rc = pci_request_regions(pdev, DRV_NAME);
1924 pciaddr = pci_resource_start(pdev, 1);
1927 dev_err(&pdev->dev, "no MMIO resource\n");
1930 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1932 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1933 (unsigned long long)pci_resource_len(pdev, 1));
1937 /* Configure DMA attributes. */
1938 if ((sizeof(dma_addr_t) > 4) &&
1939 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1940 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1945 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1948 "No usable DMA configuration, aborting\n");
1951 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1954 "No usable consistent DMA configuration, aborting\n");
1959 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1960 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1962 dev->features |= NETIF_F_RXCSUM;
1963 dev->hw_features |= NETIF_F_RXCSUM;
1965 regs = ioremap(pciaddr, CP_REGS_SIZE);
1968 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1969 (unsigned long long)pci_resource_len(pdev, 1),
1970 (unsigned long long)pciaddr);
1977 /* read MAC address from EEPROM */
1978 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1979 for (i = 0; i < 3; i++)
1980 ((__le16 *) (dev->dev_addr))[i] =
1981 cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1983 dev->netdev_ops = &cp_netdev_ops;
1984 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1985 dev->ethtool_ops = &cp_ethtool_ops;
1986 dev->watchdog_timeo = TX_TIMEOUT;
1988 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1991 dev->features |= NETIF_F_HIGHDMA;
1993 /* disabled by default until verified */
1994 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1995 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1996 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1999 rc = register_netdev(dev);
2003 netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
2004 regs, dev->dev_addr, pdev->irq);
2006 pci_set_drvdata(pdev, dev);
2008 /* enable busmastering and memory-write-invalidate */
2009 pci_set_master(pdev);
2011 if (cp->wol_enabled)
2012 cp_set_d3_state (cp);
2019 pci_release_regions(pdev);
2021 pci_clear_mwi(pdev);
2023 pci_disable_device(pdev);
2029 static void cp_remove_one (struct pci_dev *pdev)
2031 struct net_device *dev = pci_get_drvdata(pdev);
2032 struct cp_private *cp = netdev_priv(dev);
2034 unregister_netdev(dev);
2036 if (cp->wol_enabled)
2037 pci_set_power_state (pdev, PCI_D0);
2038 pci_release_regions(pdev);
2039 pci_clear_mwi(pdev);
2040 pci_disable_device(pdev);
2045 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2047 struct net_device *dev = pci_get_drvdata(pdev);
2048 struct cp_private *cp = netdev_priv(dev);
2049 unsigned long flags;
2051 if (!netif_running(dev))
2054 netif_device_detach (dev);
2055 netif_stop_queue (dev);
2057 spin_lock_irqsave (&cp->lock, flags);
2059 /* Disable Rx and Tx */
2060 cpw16 (IntrMask, 0);
2061 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2063 spin_unlock_irqrestore (&cp->lock, flags);
2065 pci_save_state(pdev);
2066 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2067 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2072 static int cp_resume (struct pci_dev *pdev)
2074 struct net_device *dev = pci_get_drvdata (pdev);
2075 struct cp_private *cp = netdev_priv(dev);
2076 unsigned long flags;
2078 if (!netif_running(dev))
2081 netif_device_attach (dev);
2083 pci_set_power_state(pdev, PCI_D0);
2084 pci_restore_state(pdev);
2085 pci_enable_wake(pdev, PCI_D0, 0);
2087 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2088 cp_init_rings_index (cp);
2091 netif_start_queue (dev);
2093 spin_lock_irqsave (&cp->lock, flags);
2095 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2097 spin_unlock_irqrestore (&cp->lock, flags);
2101 #endif /* CONFIG_PM */
2103 static const struct pci_device_id cp_pci_tbl[] = {
2104 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
2105 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
2108 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
2110 static struct pci_driver cp_driver = {
2112 .id_table = cp_pci_tbl,
2113 .probe = cp_init_one,
2114 .remove = cp_remove_one,
2116 .resume = cp_resume,
2117 .suspend = cp_suspend,
2121 module_pci_driver(cp_driver);