1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
41 static int rx_copybreak;
42 static int flowctrl=1;
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
57 static char *media[MAX_UNITS];
60 /* Operational parameters that are set at compile time. */
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h> /* Processor type for cache alignment. */
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #include <linux/dma-mapping.h>
100 #include <linux/crc32.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
104 /* These identify the driver base version and may not be removed. */
105 static const char version[] =
106 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
107 " Written by Donald Becker\n";
109 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
110 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
111 MODULE_LICENSE("GPL");
113 module_param(debug, int, 0);
114 module_param(rx_copybreak, int, 0);
115 module_param_array(media, charp, NULL, 0);
116 module_param(flowctrl, int, 0);
117 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
118 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
119 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
124 I. Board Compatibility
126 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
128 II. Board-specific settings
130 III. Driver operation
134 This driver uses two statically allocated fixed-size descriptor lists
135 formed into rings by a branch from the final descriptor to the beginning of
136 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
137 Some chips explicitly use only 2^N sized rings, while others use a
138 'next descriptor' pointer that the driver forms into rings.
140 IIIb/c. Transmit/Receive Structure
142 This driver uses a zero-copy receive and transmit scheme.
143 The driver allocates full frame size skbuffs for the Rx ring buffers at
144 open() time and passes the skb->data field to the chip as receive data
145 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
146 a fresh skbuff is allocated and the frame is copied to the new skbuff.
147 When the incoming frame is larger, the skbuff is passed directly up the
148 protocol stack. Buffers consumed this way are replaced by newly allocated
149 skbuffs in a later phase of receives.
151 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
152 using a full-sized skbuff for small frames vs. the copying costs of larger
153 frames. New boards are typically used in generously configured machines
154 and the underfilled buffers have negligible impact compared to the benefit of
155 a single allocation size, so the default value of zero results in never
156 copying packets. When copying is done, the cost is usually mitigated by using
157 a combined copy/checksum routine. Copying also preloads the cache, which is
158 most useful with small frames.
160 A subtle aspect of the operation is that the IP header at offset 14 in an
161 ethernet frame isn't longword aligned for further processing.
162 Unaligned buffers are permitted by the Sundance hardware, so
163 frames are received into the skbuff at an offset of "+2", 16-byte aligning
166 IIId. Synchronization
168 The driver runs as two independent, single-threaded flows of control. One
169 is the send-packet routine, which enforces single-threaded use by the
170 dev->tbusy flag. The other thread is the interrupt handler, which is single
171 threaded by the hardware and interrupt handling software.
173 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
174 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
175 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
176 the 'lp->tx_full' flag.
178 The interrupt handler has exclusive control over the Rx ring and records stats
179 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
180 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
181 clears both the tx_full and tbusy flags.
187 The Sundance ST201 datasheet, preliminary version.
188 The Kendin KS8723 datasheet, preliminary version.
189 The ICplus IP100 datasheet, preliminary version.
190 http://www.scyld.com/expert/100mbps.html
191 http://www.scyld.com/expert/NWay.html
197 /* Work-around for Kendin chip bugs. */
198 #ifndef CONFIG_SUNDANCE_MMIO
202 static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
203 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
204 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
205 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
206 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
207 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
208 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
209 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
212 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
221 static const struct pci_id_info pci_id_tbl[] = {
222 {"D-Link DFE-550TX FAST Ethernet Adapter"},
223 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 {"D-Link DFE-580TX 4 port Server Adapter"},
225 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
226 {"D-Link DL10050-based FAST Ethernet Adapter"},
227 {"Sundance Technology Alta"},
228 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
229 { } /* terminate list. */
232 /* This driver was written to use PCI memory space, however x86-oriented
233 hardware often uses I/O space accesses. */
235 /* Offsets to the device registers.
236 Unlike software-only systems, device drivers interact with complex hardware.
237 It's not useful to define symbolic names for every register bit in the
238 device. The name can only partially document the semantics and make
239 the driver longer and more difficult to read.
240 In general, only the important configuration values or bits changed
241 multiple times should be defined symbolically.
246 TxDMABurstThresh = 0x08,
247 TxDMAUrgentThresh = 0x09,
248 TxDMAPollPeriod = 0x0a,
253 RxDMABurstThresh = 0x14,
254 RxDMAUrgentThresh = 0x15,
255 RxDMAPollPeriod = 0x16,
275 MulticastFilter0 = 0x60,
276 MulticastFilter1 = 0x64,
283 StatsCarrierError = 0x74,
284 StatsLateColl = 0x75,
285 StatsMultiColl = 0x76,
289 StatsTxXSDefer = 0x7a,
295 /* Aliased and bogus values! */
299 #define ASIC_HI_WORD(x) ((x) + 2)
301 enum ASICCtrl_HiWord_bit {
302 GlobalReset = 0x0001,
307 NetworkReset = 0x0020,
312 /* Bits in the interrupt status/mask registers. */
313 enum intr_status_bits {
314 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
315 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
317 StatsMax=0x0080, LinkChange=0x0100,
318 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
321 /* Bits in the RxMode register. */
323 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
324 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
326 /* Bits in MACCtrl. */
327 enum mac_ctrl0_bits {
328 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
329 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
331 enum mac_ctrl1_bits {
332 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
333 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
334 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
337 /* Bits in WakeEvent register. */
338 enum wake_event_bits {
339 WakePktEnable = 0x01,
340 MagicPktEnable = 0x02,
341 LinkEventEnable = 0x04,
345 /* The Rx and Tx buffer descriptors. */
346 /* Note that using only 32 bit fields simplifies conversion to big-endian
351 struct desc_frag { __le32 addr, length; } frag[1];
354 /* Bits in netdev_desc.status */
355 enum desc_status_bits {
357 DescEndPacket=0x4000,
361 DescIntrOnDMADone=0x80000000,
362 DisableAlign = 0x00000001,
365 #define PRIV_ALIGN 15 /* Required alignment mask */
366 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
367 within the structure. */
369 struct netdev_private {
370 /* Descriptor rings first for alignment. */
371 struct netdev_desc *rx_ring;
372 struct netdev_desc *tx_ring;
373 struct sk_buff* rx_skbuff[RX_RING_SIZE];
374 struct sk_buff* tx_skbuff[TX_RING_SIZE];
375 dma_addr_t tx_ring_dma;
376 dma_addr_t rx_ring_dma;
377 struct timer_list timer; /* Media monitoring timer. */
378 /* ethtool extra stats */
380 u64 tx_multiple_collisions;
381 u64 tx_single_collisions;
382 u64 tx_late_collisions;
384 u64 tx_deferred_excessive;
391 /* Frequently used values: keep some adjacent for cache effect. */
395 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
396 unsigned int rx_buf_sz; /* Based on MTU+slack. */
397 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
398 unsigned int cur_tx, dirty_tx;
399 /* These values are keep track of the transceiver/media in use. */
400 unsigned int flowctrl:1;
401 unsigned int default_port:4; /* Last dev->if_port value. */
402 unsigned int an_enable:1;
404 unsigned int wol_enabled:1; /* Wake on LAN enabled */
405 struct tasklet_struct rx_tasklet;
406 struct tasklet_struct tx_tasklet;
409 /* Multicast and receive mode. */
410 spinlock_t mcastlock; /* SMP lock multicast updates. */
412 /* MII transceiver section. */
413 struct mii_if_info mii_if;
414 int mii_preamble_required;
415 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
416 struct pci_dev *pci_dev;
421 /* The station address location in the EEPROM. */
422 #define EEPROM_SA_OFFSET 0x10
423 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
424 IntrDrvRqst | IntrTxDone | StatsMax | \
427 static int change_mtu(struct net_device *dev, int new_mtu);
428 static int eeprom_read(void __iomem *ioaddr, int location);
429 static int mdio_read(struct net_device *dev, int phy_id, int location);
430 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
431 static int mdio_wait_link(struct net_device *dev, int wait);
432 static int netdev_open(struct net_device *dev);
433 static void check_duplex(struct net_device *dev);
434 static void netdev_timer(unsigned long data);
435 static void tx_timeout(struct net_device *dev);
436 static void init_ring(struct net_device *dev);
437 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
438 static int reset_tx (struct net_device *dev);
439 static irqreturn_t intr_handler(int irq, void *dev_instance);
440 static void rx_poll(unsigned long data);
441 static void tx_poll(unsigned long data);
442 static void refill_rx (struct net_device *dev);
443 static void netdev_error(struct net_device *dev, int intr_status);
444 static void netdev_error(struct net_device *dev, int intr_status);
445 static void set_rx_mode(struct net_device *dev);
446 static int __set_mac_addr(struct net_device *dev);
447 static int sundance_set_mac_addr(struct net_device *dev, void *data);
448 static struct net_device_stats *get_stats(struct net_device *dev);
449 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
450 static int netdev_close(struct net_device *dev);
451 static const struct ethtool_ops ethtool_ops;
453 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
455 struct netdev_private *np = netdev_priv(dev);
456 void __iomem *ioaddr = np->base + ASICCtrl;
459 /* ST201 documentation states ASICCtrl is a 32bit register */
460 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
461 /* ST201 documentation states reset can take up to 1 ms */
463 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
464 if (--countdown == 0) {
465 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
472 static const struct net_device_ops netdev_ops = {
473 .ndo_open = netdev_open,
474 .ndo_stop = netdev_close,
475 .ndo_start_xmit = start_tx,
476 .ndo_get_stats = get_stats,
477 .ndo_set_rx_mode = set_rx_mode,
478 .ndo_do_ioctl = netdev_ioctl,
479 .ndo_tx_timeout = tx_timeout,
480 .ndo_change_mtu = change_mtu,
481 .ndo_set_mac_address = sundance_set_mac_addr,
482 .ndo_validate_addr = eth_validate_addr,
485 static int sundance_probe1(struct pci_dev *pdev,
486 const struct pci_device_id *ent)
488 struct net_device *dev;
489 struct netdev_private *np;
491 int chip_idx = ent->driver_data;
494 void __iomem *ioaddr;
503 int phy, phy_end, phy_idx = 0;
505 /* when built into the kernel, we only print version if device is found */
507 static int printed_version;
508 if (!printed_version++)
512 if (pci_enable_device(pdev))
514 pci_set_master(pdev);
518 dev = alloc_etherdev(sizeof(*np));
521 SET_NETDEV_DEV(dev, &pdev->dev);
523 if (pci_request_regions(pdev, DRV_NAME))
526 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
530 for (i = 0; i < 3; i++)
531 ((__le16 *)dev->dev_addr)[i] =
532 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
534 np = netdev_priv(dev);
537 np->chip_id = chip_idx;
538 np->msg_enable = (1 << debug) - 1;
539 spin_lock_init(&np->lock);
540 spin_lock_init(&np->statlock);
541 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
542 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
544 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
545 &ring_dma, GFP_KERNEL);
547 goto err_out_cleardev;
548 np->tx_ring = (struct netdev_desc *)ring_space;
549 np->tx_ring_dma = ring_dma;
551 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
552 &ring_dma, GFP_KERNEL);
554 goto err_out_unmap_tx;
555 np->rx_ring = (struct netdev_desc *)ring_space;
556 np->rx_ring_dma = ring_dma;
558 np->mii_if.dev = dev;
559 np->mii_if.mdio_read = mdio_read;
560 np->mii_if.mdio_write = mdio_write;
561 np->mii_if.phy_id_mask = 0x1f;
562 np->mii_if.reg_num_mask = 0x1f;
564 /* The chip-specific entries in the device structure. */
565 dev->netdev_ops = &netdev_ops;
566 SET_ETHTOOL_OPS(dev, ðtool_ops);
567 dev->watchdog_timeo = TX_TIMEOUT;
569 pci_set_drvdata(pdev, dev);
571 i = register_netdev(dev);
573 goto err_out_unmap_rx;
575 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
576 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
579 np->phys[0] = 1; /* Default setting */
580 np->mii_preamble_required++;
583 * It seems some phys doesn't deal well with address 0 being accessed
586 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
591 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
593 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
594 int phyx = phy & 0x1f;
595 int mii_status = mdio_read(dev, phyx, MII_BMSR);
596 if (mii_status != 0xffff && mii_status != 0x0000) {
597 np->phys[phy_idx++] = phyx;
598 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
599 if ((mii_status & 0x0040) == 0)
600 np->mii_preamble_required++;
601 printk(KERN_INFO "%s: MII PHY found at address %d, status "
602 "0x%4.4x advertising %4.4x.\n",
603 dev->name, phyx, mii_status, np->mii_if.advertising);
606 np->mii_preamble_required--;
609 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
610 dev->name, ioread32(ioaddr + ASICCtrl));
611 goto err_out_unregister;
614 np->mii_if.phy_id = np->phys[0];
616 /* Parse override configuration */
618 if (card_idx < MAX_UNITS) {
619 if (media[card_idx] != NULL) {
621 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
622 strcmp (media[card_idx], "4") == 0) {
624 np->mii_if.full_duplex = 1;
625 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
626 strcmp (media[card_idx], "3") == 0) {
628 np->mii_if.full_duplex = 0;
629 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
630 strcmp (media[card_idx], "2") == 0) {
632 np->mii_if.full_duplex = 1;
633 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
634 strcmp (media[card_idx], "1") == 0) {
636 np->mii_if.full_duplex = 0;
646 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
647 /* Default 100Mbps Full */
650 np->mii_if.full_duplex = 1;
655 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
657 /* If flow control enabled, we need to advertise it.*/
659 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
660 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
661 /* Force media type */
662 if (!np->an_enable) {
664 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
665 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
666 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
667 printk (KERN_INFO "Override speed=%d, %s duplex\n",
668 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
672 /* Perhaps move the reset here? */
673 /* Reset the chip to erase previous misconfiguration. */
674 if (netif_msg_hw(np))
675 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
676 sundance_reset(dev, 0x00ff << 16);
677 if (netif_msg_hw(np))
678 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
684 unregister_netdev(dev);
686 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
687 np->rx_ring, np->rx_ring_dma);
689 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
690 np->tx_ring, np->tx_ring_dma);
692 pci_set_drvdata(pdev, NULL);
693 pci_iounmap(pdev, ioaddr);
695 pci_release_regions(pdev);
701 static int change_mtu(struct net_device *dev, int new_mtu)
703 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
705 if (netif_running(dev))
711 #define eeprom_delay(ee_addr) ioread32(ee_addr)
712 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
713 static int eeprom_read(void __iomem *ioaddr, int location)
715 int boguscnt = 10000; /* Typical 1900 ticks. */
716 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
718 eeprom_delay(ioaddr + EECtrl);
719 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
720 return ioread16(ioaddr + EEData);
722 } while (--boguscnt > 0);
726 /* MII transceiver control section.
727 Read and write the MII registers using software-generated serial
728 MDIO protocol. See the MII specifications or DP83840A data sheet
731 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
732 met by back-to-back 33Mhz PCI cycles. */
733 #define mdio_delay() ioread8(mdio_addr)
736 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
738 #define MDIO_EnbIn (0)
739 #define MDIO_WRITE0 (MDIO_EnbOutput)
740 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
742 /* Generate the preamble required for initial synchronization and
743 a few older transceivers. */
744 static void mdio_sync(void __iomem *mdio_addr)
748 /* Establish sync by sending at least 32 logic ones. */
749 while (--bits >= 0) {
750 iowrite8(MDIO_WRITE1, mdio_addr);
752 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
757 static int mdio_read(struct net_device *dev, int phy_id, int location)
759 struct netdev_private *np = netdev_priv(dev);
760 void __iomem *mdio_addr = np->base + MIICtrl;
761 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
764 if (np->mii_preamble_required)
765 mdio_sync(mdio_addr);
767 /* Shift the read command bits out. */
768 for (i = 15; i >= 0; i--) {
769 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
771 iowrite8(dataval, mdio_addr);
773 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
776 /* Read the two transition, 16 data, and wire-idle bits. */
777 for (i = 19; i > 0; i--) {
778 iowrite8(MDIO_EnbIn, mdio_addr);
780 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
781 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
784 return (retval>>1) & 0xffff;
787 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
789 struct netdev_private *np = netdev_priv(dev);
790 void __iomem *mdio_addr = np->base + MIICtrl;
791 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
794 if (np->mii_preamble_required)
795 mdio_sync(mdio_addr);
797 /* Shift the command bits out. */
798 for (i = 31; i >= 0; i--) {
799 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
801 iowrite8(dataval, mdio_addr);
803 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
806 /* Clear out extra bits. */
807 for (i = 2; i > 0; i--) {
808 iowrite8(MDIO_EnbIn, mdio_addr);
810 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
815 static int mdio_wait_link(struct net_device *dev, int wait)
819 struct netdev_private *np;
821 np = netdev_priv(dev);
822 phy_id = np->phys[0];
825 bmsr = mdio_read(dev, phy_id, MII_BMSR);
829 } while (--wait > 0);
833 static int netdev_open(struct net_device *dev)
835 struct netdev_private *np = netdev_priv(dev);
836 void __iomem *ioaddr = np->base;
837 const int irq = np->pci_dev->irq;
841 sundance_reset(dev, 0x00ff << 16);
843 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
847 if (netif_msg_ifup(np))
848 printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
852 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
853 /* The Tx list pointer is written as packets are queued. */
855 /* Initialize other registers. */
857 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
858 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
860 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
863 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
865 /* Configure the PCI bus bursts and FIFO thresholds. */
867 if (dev->if_port == 0)
868 dev->if_port = np->default_port;
870 spin_lock_init(&np->mcastlock);
873 iowrite16(0, ioaddr + IntrEnable);
874 iowrite16(0, ioaddr + DownCounter);
875 /* Set the chip to poll every N*320nsec. */
876 iowrite8(100, ioaddr + RxDMAPollPeriod);
877 iowrite8(127, ioaddr + TxDMAPollPeriod);
878 /* Fix DFE-580TX packet drop issue */
879 if (np->pci_dev->revision >= 0x14)
880 iowrite8(0x01, ioaddr + DebugCtrl1);
881 netif_start_queue(dev);
883 spin_lock_irqsave(&np->lock, flags);
885 spin_unlock_irqrestore(&np->lock, flags);
887 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
890 iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
893 if (netif_msg_ifup(np))
894 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
895 "MAC Control %x, %4.4x %4.4x.\n",
896 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
897 ioread32(ioaddr + MACCtrl0),
898 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
900 /* Set the timer to check for link beat. */
901 init_timer(&np->timer);
902 np->timer.expires = jiffies + 3*HZ;
903 np->timer.data = (unsigned long)dev;
904 np->timer.function = netdev_timer; /* timer handler */
905 add_timer(&np->timer);
907 /* Enable interrupts by setting the interrupt mask. */
908 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
913 static void check_duplex(struct net_device *dev)
915 struct netdev_private *np = netdev_priv(dev);
916 void __iomem *ioaddr = np->base;
917 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
918 int negotiated = mii_lpa & np->mii_if.advertising;
922 if (!np->an_enable || mii_lpa == 0xffff) {
923 if (np->mii_if.full_duplex)
924 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
929 /* Autonegotiation */
930 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
931 if (np->mii_if.full_duplex != duplex) {
932 np->mii_if.full_duplex = duplex;
933 if (netif_msg_link(np))
934 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
935 "negotiated capability %4.4x.\n", dev->name,
936 duplex ? "full" : "half", np->phys[0], negotiated);
937 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
941 static void netdev_timer(unsigned long data)
943 struct net_device *dev = (struct net_device *)data;
944 struct netdev_private *np = netdev_priv(dev);
945 void __iomem *ioaddr = np->base;
946 int next_tick = 10*HZ;
948 if (netif_msg_timer(np)) {
949 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
951 dev->name, ioread16(ioaddr + IntrEnable),
952 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
955 np->timer.expires = jiffies + next_tick;
956 add_timer(&np->timer);
959 static void tx_timeout(struct net_device *dev)
961 struct netdev_private *np = netdev_priv(dev);
962 void __iomem *ioaddr = np->base;
965 netif_stop_queue(dev);
966 tasklet_disable(&np->tx_tasklet);
967 iowrite16(0, ioaddr + IntrEnable);
968 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
970 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
971 ioread8(ioaddr + TxFrameId));
975 for (i=0; i<TX_RING_SIZE; i++) {
976 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
977 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
978 le32_to_cpu(np->tx_ring[i].next_desc),
979 le32_to_cpu(np->tx_ring[i].status),
980 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
981 le32_to_cpu(np->tx_ring[i].frag[0].addr),
982 le32_to_cpu(np->tx_ring[i].frag[0].length));
984 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
985 ioread32(np->base + TxListPtr),
986 netif_queue_stopped(dev));
987 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
988 np->cur_tx, np->cur_tx % TX_RING_SIZE,
989 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
990 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
991 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
993 spin_lock_irqsave(&np->lock, flag);
995 /* Stop and restart the chip's Tx processes . */
997 spin_unlock_irqrestore(&np->lock, flag);
1001 dev->trans_start = jiffies; /* prevent tx timeout */
1002 dev->stats.tx_errors++;
1003 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1004 netif_wake_queue(dev);
1006 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1007 tasklet_enable(&np->tx_tasklet);
1011 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1012 static void init_ring(struct net_device *dev)
1014 struct netdev_private *np = netdev_priv(dev);
1017 np->cur_rx = np->cur_tx = 0;
1018 np->dirty_rx = np->dirty_tx = 0;
1021 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1023 /* Initialize all Rx descriptors. */
1024 for (i = 0; i < RX_RING_SIZE; i++) {
1025 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1026 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1027 np->rx_ring[i].status = 0;
1028 np->rx_ring[i].frag[0].length = 0;
1029 np->rx_skbuff[i] = NULL;
1032 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1033 for (i = 0; i < RX_RING_SIZE; i++) {
1034 struct sk_buff *skb =
1035 netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1036 np->rx_skbuff[i] = skb;
1039 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1040 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1041 dma_map_single(&np->pci_dev->dev, skb->data,
1042 np->rx_buf_sz, DMA_FROM_DEVICE));
1043 if (dma_mapping_error(&np->pci_dev->dev,
1044 np->rx_ring[i].frag[0].addr)) {
1046 np->rx_skbuff[i] = NULL;
1049 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1051 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1053 for (i = 0; i < TX_RING_SIZE; i++) {
1054 np->tx_skbuff[i] = NULL;
1055 np->tx_ring[i].status = 0;
1059 static void tx_poll (unsigned long data)
1061 struct net_device *dev = (struct net_device *)data;
1062 struct netdev_private *np = netdev_priv(dev);
1063 unsigned head = np->cur_task % TX_RING_SIZE;
1064 struct netdev_desc *txdesc =
1065 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1067 /* Chain the next pointer */
1068 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1069 int entry = np->cur_task % TX_RING_SIZE;
1070 txdesc = &np->tx_ring[entry];
1072 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1073 entry*sizeof(struct netdev_desc));
1075 np->last_tx = txdesc;
1077 /* Indicate the latest descriptor of tx ring */
1078 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1080 if (ioread32 (np->base + TxListPtr) == 0)
1081 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1082 np->base + TxListPtr);
1086 start_tx (struct sk_buff *skb, struct net_device *dev)
1088 struct netdev_private *np = netdev_priv(dev);
1089 struct netdev_desc *txdesc;
1092 /* Calculate the next Tx descriptor entry. */
1093 entry = np->cur_tx % TX_RING_SIZE;
1094 np->tx_skbuff[entry] = skb;
1095 txdesc = &np->tx_ring[entry];
1097 txdesc->next_desc = 0;
1098 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1099 txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1100 skb->data, skb->len, DMA_TO_DEVICE));
1101 if (dma_mapping_error(&np->pci_dev->dev,
1102 txdesc->frag[0].addr))
1104 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1106 /* Increment cur_tx before tasklet_schedule() */
1109 /* Schedule a tx_poll() task */
1110 tasklet_schedule(&np->tx_tasklet);
1112 /* On some architectures: explicitly flush cache lines here. */
1113 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1114 !netif_queue_stopped(dev)) {
1117 netif_stop_queue (dev);
1119 if (netif_msg_tx_queued(np)) {
1121 "%s: Transmit frame #%d queued in slot %d.\n",
1122 dev->name, np->cur_tx, entry);
1124 return NETDEV_TX_OK;
1128 np->tx_skbuff[entry] = NULL;
1129 dev->stats.tx_dropped++;
1130 return NETDEV_TX_OK;
1133 /* Reset hardware tx and free all of tx buffers */
1135 reset_tx (struct net_device *dev)
1137 struct netdev_private *np = netdev_priv(dev);
1138 void __iomem *ioaddr = np->base;
1139 struct sk_buff *skb;
1142 /* Reset tx logic, TxListPtr will be cleaned */
1143 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1144 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1146 /* free all tx skbuff */
1147 for (i = 0; i < TX_RING_SIZE; i++) {
1148 np->tx_ring[i].next_desc = 0;
1150 skb = np->tx_skbuff[i];
1152 dma_unmap_single(&np->pci_dev->dev,
1153 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1154 skb->len, DMA_TO_DEVICE);
1155 dev_kfree_skb_any(skb);
1156 np->tx_skbuff[i] = NULL;
1157 dev->stats.tx_dropped++;
1160 np->cur_tx = np->dirty_tx = 0;
1164 iowrite8(127, ioaddr + TxDMAPollPeriod);
1166 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1170 /* The interrupt handler cleans up after the Tx thread,
1171 and schedule a Rx thread work */
1172 static irqreturn_t intr_handler(int irq, void *dev_instance)
1174 struct net_device *dev = (struct net_device *)dev_instance;
1175 struct netdev_private *np = netdev_priv(dev);
1176 void __iomem *ioaddr = np->base;
1185 int intr_status = ioread16(ioaddr + IntrStatus);
1186 iowrite16(intr_status, ioaddr + IntrStatus);
1188 if (netif_msg_intr(np))
1189 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1190 dev->name, intr_status);
1192 if (!(intr_status & DEFAULT_INTR))
1197 if (intr_status & (IntrRxDMADone)) {
1198 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1199 ioaddr + IntrEnable);
1201 np->budget = RX_BUDGET;
1202 tasklet_schedule(&np->rx_tasklet);
1204 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1205 tx_status = ioread16 (ioaddr + TxStatus);
1206 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1207 if (netif_msg_tx_done(np))
1209 ("%s: Transmit status is %2.2x.\n",
1210 dev->name, tx_status);
1211 if (tx_status & 0x1e) {
1212 if (netif_msg_tx_err(np))
1213 printk("%s: Transmit error status %4.4x.\n",
1214 dev->name, tx_status);
1215 dev->stats.tx_errors++;
1216 if (tx_status & 0x10)
1217 dev->stats.tx_fifo_errors++;
1218 if (tx_status & 0x08)
1219 dev->stats.collisions++;
1220 if (tx_status & 0x04)
1221 dev->stats.tx_fifo_errors++;
1222 if (tx_status & 0x02)
1223 dev->stats.tx_window_errors++;
1226 ** This reset has been verified on
1227 ** DFE-580TX boards ! phdm@macqel.be.
1229 if (tx_status & 0x10) { /* TxUnderrun */
1230 /* Restart Tx FIFO and transmitter */
1231 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1232 /* No need to reset the Tx pointer here */
1234 /* Restart the Tx. Need to make sure tx enabled */
1237 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1238 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1243 /* Yup, this is a documentation bug. It cost me *hours*. */
1244 iowrite16 (0, ioaddr + TxStatus);
1246 iowrite32(5000, ioaddr + DownCounter);
1249 tx_status = ioread16 (ioaddr + TxStatus);
1251 hw_frame_id = (tx_status >> 8) & 0xff;
1253 hw_frame_id = ioread8(ioaddr + TxFrameId);
1256 if (np->pci_dev->revision >= 0x14) {
1257 spin_lock(&np->lock);
1258 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1259 int entry = np->dirty_tx % TX_RING_SIZE;
1260 struct sk_buff *skb;
1262 sw_frame_id = (le32_to_cpu(
1263 np->tx_ring[entry].status) >> 2) & 0xff;
1264 if (sw_frame_id == hw_frame_id &&
1265 !(le32_to_cpu(np->tx_ring[entry].status)
1268 if (sw_frame_id == (hw_frame_id + 1) %
1271 skb = np->tx_skbuff[entry];
1272 /* Free the original skb. */
1273 dma_unmap_single(&np->pci_dev->dev,
1274 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1275 skb->len, DMA_TO_DEVICE);
1276 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1277 np->tx_skbuff[entry] = NULL;
1278 np->tx_ring[entry].frag[0].addr = 0;
1279 np->tx_ring[entry].frag[0].length = 0;
1281 spin_unlock(&np->lock);
1283 spin_lock(&np->lock);
1284 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1285 int entry = np->dirty_tx % TX_RING_SIZE;
1286 struct sk_buff *skb;
1287 if (!(le32_to_cpu(np->tx_ring[entry].status)
1290 skb = np->tx_skbuff[entry];
1291 /* Free the original skb. */
1292 dma_unmap_single(&np->pci_dev->dev,
1293 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1294 skb->len, DMA_TO_DEVICE);
1295 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1296 np->tx_skbuff[entry] = NULL;
1297 np->tx_ring[entry].frag[0].addr = 0;
1298 np->tx_ring[entry].frag[0].length = 0;
1300 spin_unlock(&np->lock);
1303 if (netif_queue_stopped(dev) &&
1304 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1305 /* The ring is no longer full, clear busy flag. */
1306 netif_wake_queue (dev);
1308 /* Abnormal error summary/uncommon events handlers. */
1309 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1310 netdev_error(dev, intr_status);
1312 if (netif_msg_intr(np))
1313 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1314 dev->name, ioread16(ioaddr + IntrStatus));
1315 return IRQ_RETVAL(handled);
1318 static void rx_poll(unsigned long data)
1320 struct net_device *dev = (struct net_device *)data;
1321 struct netdev_private *np = netdev_priv(dev);
1322 int entry = np->cur_rx % RX_RING_SIZE;
1323 int boguscnt = np->budget;
1324 void __iomem *ioaddr = np->base;
1327 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1329 struct netdev_desc *desc = &(np->rx_ring[entry]);
1330 u32 frame_status = le32_to_cpu(desc->status);
1333 if (--boguscnt < 0) {
1336 if (!(frame_status & DescOwn))
1338 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1339 if (netif_msg_rx_status(np))
1340 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1342 if (frame_status & 0x001f4000) {
1343 /* There was a error. */
1344 if (netif_msg_rx_err(np))
1345 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1347 dev->stats.rx_errors++;
1348 if (frame_status & 0x00100000)
1349 dev->stats.rx_length_errors++;
1350 if (frame_status & 0x00010000)
1351 dev->stats.rx_fifo_errors++;
1352 if (frame_status & 0x00060000)
1353 dev->stats.rx_frame_errors++;
1354 if (frame_status & 0x00080000)
1355 dev->stats.rx_crc_errors++;
1356 if (frame_status & 0x00100000) {
1357 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1359 dev->name, frame_status);
1362 struct sk_buff *skb;
1363 #ifndef final_version
1364 if (netif_msg_rx_status(np))
1365 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1366 ", bogus_cnt %d.\n",
1369 /* Check if the packet is long enough to accept without copying
1370 to a minimally-sized skbuff. */
1371 if (pkt_len < rx_copybreak &&
1372 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1373 skb_reserve(skb, 2); /* 16 byte align the IP header */
1374 dma_sync_single_for_cpu(&np->pci_dev->dev,
1375 le32_to_cpu(desc->frag[0].addr),
1376 np->rx_buf_sz, DMA_FROM_DEVICE);
1377 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1378 dma_sync_single_for_device(&np->pci_dev->dev,
1379 le32_to_cpu(desc->frag[0].addr),
1380 np->rx_buf_sz, DMA_FROM_DEVICE);
1381 skb_put(skb, pkt_len);
1383 dma_unmap_single(&np->pci_dev->dev,
1384 le32_to_cpu(desc->frag[0].addr),
1385 np->rx_buf_sz, DMA_FROM_DEVICE);
1386 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1387 np->rx_skbuff[entry] = NULL;
1389 skb->protocol = eth_type_trans(skb, dev);
1390 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1393 entry = (entry + 1) % RX_RING_SIZE;
1398 np->budget -= received;
1399 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1407 np->budget -= received;
1408 if (np->budget <= 0)
1409 np->budget = RX_BUDGET;
1410 tasklet_schedule(&np->rx_tasklet);
1413 static void refill_rx (struct net_device *dev)
1415 struct netdev_private *np = netdev_priv(dev);
1419 /* Refill the Rx ring buffers. */
1420 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1421 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1422 struct sk_buff *skb;
1423 entry = np->dirty_rx % RX_RING_SIZE;
1424 if (np->rx_skbuff[entry] == NULL) {
1425 skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1426 np->rx_skbuff[entry] = skb;
1428 break; /* Better luck next round. */
1429 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1430 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1431 dma_map_single(&np->pci_dev->dev, skb->data,
1432 np->rx_buf_sz, DMA_FROM_DEVICE));
1433 if (dma_mapping_error(&np->pci_dev->dev,
1434 np->rx_ring[entry].frag[0].addr)) {
1435 dev_kfree_skb_irq(skb);
1436 np->rx_skbuff[entry] = NULL;
1440 /* Perhaps we need not reset this field. */
1441 np->rx_ring[entry].frag[0].length =
1442 cpu_to_le32(np->rx_buf_sz | LastFrag);
1443 np->rx_ring[entry].status = 0;
1447 static void netdev_error(struct net_device *dev, int intr_status)
1449 struct netdev_private *np = netdev_priv(dev);
1450 void __iomem *ioaddr = np->base;
1451 u16 mii_ctl, mii_advertise, mii_lpa;
1454 if (intr_status & LinkChange) {
1455 if (mdio_wait_link(dev, 10) == 0) {
1456 printk(KERN_INFO "%s: Link up\n", dev->name);
1457 if (np->an_enable) {
1458 mii_advertise = mdio_read(dev, np->phys[0],
1460 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1461 mii_advertise &= mii_lpa;
1462 printk(KERN_INFO "%s: Link changed: ",
1464 if (mii_advertise & ADVERTISE_100FULL) {
1466 printk("100Mbps, full duplex\n");
1467 } else if (mii_advertise & ADVERTISE_100HALF) {
1469 printk("100Mbps, half duplex\n");
1470 } else if (mii_advertise & ADVERTISE_10FULL) {
1472 printk("10Mbps, full duplex\n");
1473 } else if (mii_advertise & ADVERTISE_10HALF) {
1475 printk("10Mbps, half duplex\n");
1480 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1481 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1483 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1485 printk("%s duplex.\n",
1486 (mii_ctl & BMCR_FULLDPLX) ?
1490 if (np->flowctrl && np->mii_if.full_duplex) {
1491 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1492 ioaddr + MulticastFilter1+2);
1493 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1496 netif_carrier_on(dev);
1498 printk(KERN_INFO "%s: Link down\n", dev->name);
1499 netif_carrier_off(dev);
1502 if (intr_status & StatsMax) {
1505 if (intr_status & IntrPCIErr) {
1506 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1507 dev->name, intr_status);
1508 /* We must do a global reset of DMA to continue. */
1512 static struct net_device_stats *get_stats(struct net_device *dev)
1514 struct netdev_private *np = netdev_priv(dev);
1515 void __iomem *ioaddr = np->base;
1516 unsigned long flags;
1517 u8 late_coll, single_coll, mult_coll;
1519 spin_lock_irqsave(&np->statlock, flags);
1520 /* The chip only need report frame silently dropped. */
1521 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1522 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1523 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1524 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1526 mult_coll = ioread8(ioaddr + StatsMultiColl);
1527 np->xstats.tx_multiple_collisions += mult_coll;
1528 single_coll = ioread8(ioaddr + StatsOneColl);
1529 np->xstats.tx_single_collisions += single_coll;
1530 late_coll = ioread8(ioaddr + StatsLateColl);
1531 np->xstats.tx_late_collisions += late_coll;
1532 dev->stats.collisions += mult_coll
1536 np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1537 np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1538 np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1539 np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1540 np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1541 np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1542 np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1544 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1545 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1546 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1547 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1549 spin_unlock_irqrestore(&np->statlock, flags);
1554 static void set_rx_mode(struct net_device *dev)
1556 struct netdev_private *np = netdev_priv(dev);
1557 void __iomem *ioaddr = np->base;
1558 u16 mc_filter[4]; /* Multicast hash filter */
1562 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1563 memset(mc_filter, 0xff, sizeof(mc_filter));
1564 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1565 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1566 (dev->flags & IFF_ALLMULTI)) {
1567 /* Too many to match, or accept all multicasts. */
1568 memset(mc_filter, 0xff, sizeof(mc_filter));
1569 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1570 } else if (!netdev_mc_empty(dev)) {
1571 struct netdev_hw_addr *ha;
1575 memset (mc_filter, 0, sizeof (mc_filter));
1576 netdev_for_each_mc_addr(ha, dev) {
1577 crc = ether_crc_le(ETH_ALEN, ha->addr);
1578 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1579 if (crc & 0x80000000) index |= 1 << bit;
1580 mc_filter[index/16] |= (1 << (index % 16));
1582 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1584 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1587 if (np->mii_if.full_duplex && np->flowctrl)
1588 mc_filter[3] |= 0x0200;
1590 for (i = 0; i < 4; i++)
1591 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1592 iowrite8(rx_mode, ioaddr + RxMode);
1595 static int __set_mac_addr(struct net_device *dev)
1597 struct netdev_private *np = netdev_priv(dev);
1600 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1601 iowrite16(addr16, np->base + StationAddr);
1602 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1603 iowrite16(addr16, np->base + StationAddr+2);
1604 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1605 iowrite16(addr16, np->base + StationAddr+4);
1609 /* Invoked with rtnl_lock held */
1610 static int sundance_set_mac_addr(struct net_device *dev, void *data)
1612 const struct sockaddr *addr = data;
1614 if (!is_valid_ether_addr(addr->sa_data))
1615 return -EADDRNOTAVAIL;
1616 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1617 __set_mac_addr(dev);
1622 static const struct {
1623 const char name[ETH_GSTRING_LEN];
1624 } sundance_stats[] = {
1625 { "tx_multiple_collisions" },
1626 { "tx_single_collisions" },
1627 { "tx_late_collisions" },
1629 { "tx_deferred_excessive" },
1637 static int check_if_running(struct net_device *dev)
1639 if (!netif_running(dev))
1644 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1646 struct netdev_private *np = netdev_priv(dev);
1647 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1648 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1649 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1652 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1654 struct netdev_private *np = netdev_priv(dev);
1655 spin_lock_irq(&np->lock);
1656 mii_ethtool_gset(&np->mii_if, ecmd);
1657 spin_unlock_irq(&np->lock);
1661 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1663 struct netdev_private *np = netdev_priv(dev);
1665 spin_lock_irq(&np->lock);
1666 res = mii_ethtool_sset(&np->mii_if, ecmd);
1667 spin_unlock_irq(&np->lock);
1671 static int nway_reset(struct net_device *dev)
1673 struct netdev_private *np = netdev_priv(dev);
1674 return mii_nway_restart(&np->mii_if);
1677 static u32 get_link(struct net_device *dev)
1679 struct netdev_private *np = netdev_priv(dev);
1680 return mii_link_ok(&np->mii_if);
1683 static u32 get_msglevel(struct net_device *dev)
1685 struct netdev_private *np = netdev_priv(dev);
1686 return np->msg_enable;
1689 static void set_msglevel(struct net_device *dev, u32 val)
1691 struct netdev_private *np = netdev_priv(dev);
1692 np->msg_enable = val;
1695 static void get_strings(struct net_device *dev, u32 stringset,
1698 if (stringset == ETH_SS_STATS)
1699 memcpy(data, sundance_stats, sizeof(sundance_stats));
1702 static int get_sset_count(struct net_device *dev, int sset)
1706 return ARRAY_SIZE(sundance_stats);
1712 static void get_ethtool_stats(struct net_device *dev,
1713 struct ethtool_stats *stats, u64 *data)
1715 struct netdev_private *np = netdev_priv(dev);
1719 data[i++] = np->xstats.tx_multiple_collisions;
1720 data[i++] = np->xstats.tx_single_collisions;
1721 data[i++] = np->xstats.tx_late_collisions;
1722 data[i++] = np->xstats.tx_deferred;
1723 data[i++] = np->xstats.tx_deferred_excessive;
1724 data[i++] = np->xstats.tx_aborted;
1725 data[i++] = np->xstats.tx_bcasts;
1726 data[i++] = np->xstats.rx_bcasts;
1727 data[i++] = np->xstats.tx_mcasts;
1728 data[i++] = np->xstats.rx_mcasts;
1733 static void sundance_get_wol(struct net_device *dev,
1734 struct ethtool_wolinfo *wol)
1736 struct netdev_private *np = netdev_priv(dev);
1737 void __iomem *ioaddr = np->base;
1742 wol->supported = (WAKE_PHY | WAKE_MAGIC);
1743 if (!np->wol_enabled)
1746 wol_bits = ioread8(ioaddr + WakeEvent);
1747 if (wol_bits & MagicPktEnable)
1748 wol->wolopts |= WAKE_MAGIC;
1749 if (wol_bits & LinkEventEnable)
1750 wol->wolopts |= WAKE_PHY;
1753 static int sundance_set_wol(struct net_device *dev,
1754 struct ethtool_wolinfo *wol)
1756 struct netdev_private *np = netdev_priv(dev);
1757 void __iomem *ioaddr = np->base;
1760 if (!device_can_wakeup(&np->pci_dev->dev))
1763 np->wol_enabled = !!(wol->wolopts);
1764 wol_bits = ioread8(ioaddr + WakeEvent);
1765 wol_bits &= ~(WakePktEnable | MagicPktEnable |
1766 LinkEventEnable | WolEnable);
1768 if (np->wol_enabled) {
1769 if (wol->wolopts & WAKE_MAGIC)
1770 wol_bits |= (MagicPktEnable | WolEnable);
1771 if (wol->wolopts & WAKE_PHY)
1772 wol_bits |= (LinkEventEnable | WolEnable);
1774 iowrite8(wol_bits, ioaddr + WakeEvent);
1776 device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1781 #define sundance_get_wol NULL
1782 #define sundance_set_wol NULL
1783 #endif /* CONFIG_PM */
1785 static const struct ethtool_ops ethtool_ops = {
1786 .begin = check_if_running,
1787 .get_drvinfo = get_drvinfo,
1788 .get_settings = get_settings,
1789 .set_settings = set_settings,
1790 .nway_reset = nway_reset,
1791 .get_link = get_link,
1792 .get_wol = sundance_get_wol,
1793 .set_wol = sundance_set_wol,
1794 .get_msglevel = get_msglevel,
1795 .set_msglevel = set_msglevel,
1796 .get_strings = get_strings,
1797 .get_sset_count = get_sset_count,
1798 .get_ethtool_stats = get_ethtool_stats,
1801 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1803 struct netdev_private *np = netdev_priv(dev);
1806 if (!netif_running(dev))
1809 spin_lock_irq(&np->lock);
1810 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1811 spin_unlock_irq(&np->lock);
1816 static int netdev_close(struct net_device *dev)
1818 struct netdev_private *np = netdev_priv(dev);
1819 void __iomem *ioaddr = np->base;
1820 struct sk_buff *skb;
1823 /* Wait and kill tasklet */
1824 tasklet_kill(&np->rx_tasklet);
1825 tasklet_kill(&np->tx_tasklet);
1831 netif_stop_queue(dev);
1833 if (netif_msg_ifdown(np)) {
1834 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1835 "Rx %4.4x Int %2.2x.\n",
1836 dev->name, ioread8(ioaddr + TxStatus),
1837 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1838 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1839 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1842 /* Disable interrupts by clearing the interrupt mask. */
1843 iowrite16(0x0000, ioaddr + IntrEnable);
1845 /* Disable Rx and Tx DMA for safely release resource */
1846 iowrite32(0x500, ioaddr + DMACtrl);
1848 /* Stop the chip's Tx and Rx processes. */
1849 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1851 for (i = 2000; i > 0; i--) {
1852 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1857 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1858 ioaddr + ASIC_HI_WORD(ASICCtrl));
1860 for (i = 2000; i > 0; i--) {
1861 if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1867 if (netif_msg_hw(np)) {
1868 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1869 (int)(np->tx_ring_dma));
1870 for (i = 0; i < TX_RING_SIZE; i++)
1871 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1872 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1873 np->tx_ring[i].frag[0].length);
1874 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1875 (int)(np->rx_ring_dma));
1876 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1877 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1878 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1879 np->rx_ring[i].frag[0].length);
1882 #endif /* __i386__ debugging only */
1884 free_irq(np->pci_dev->irq, dev);
1886 del_timer_sync(&np->timer);
1888 /* Free all the skbuffs in the Rx queue. */
1889 for (i = 0; i < RX_RING_SIZE; i++) {
1890 np->rx_ring[i].status = 0;
1891 skb = np->rx_skbuff[i];
1893 dma_unmap_single(&np->pci_dev->dev,
1894 le32_to_cpu(np->rx_ring[i].frag[0].addr),
1895 np->rx_buf_sz, DMA_FROM_DEVICE);
1897 np->rx_skbuff[i] = NULL;
1899 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1901 for (i = 0; i < TX_RING_SIZE; i++) {
1902 np->tx_ring[i].next_desc = 0;
1903 skb = np->tx_skbuff[i];
1905 dma_unmap_single(&np->pci_dev->dev,
1906 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1907 skb->len, DMA_TO_DEVICE);
1909 np->tx_skbuff[i] = NULL;
1916 static void sundance_remove1(struct pci_dev *pdev)
1918 struct net_device *dev = pci_get_drvdata(pdev);
1921 struct netdev_private *np = netdev_priv(dev);
1922 unregister_netdev(dev);
1923 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1924 np->rx_ring, np->rx_ring_dma);
1925 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1926 np->tx_ring, np->tx_ring_dma);
1927 pci_iounmap(pdev, np->base);
1928 pci_release_regions(pdev);
1930 pci_set_drvdata(pdev, NULL);
1936 static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1938 struct net_device *dev = pci_get_drvdata(pci_dev);
1939 struct netdev_private *np = netdev_priv(dev);
1940 void __iomem *ioaddr = np->base;
1942 if (!netif_running(dev))
1946 netif_device_detach(dev);
1948 pci_save_state(pci_dev);
1949 if (np->wol_enabled) {
1950 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1951 iowrite16(RxEnable, ioaddr + MACCtrl1);
1953 pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1955 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1960 static int sundance_resume(struct pci_dev *pci_dev)
1962 struct net_device *dev = pci_get_drvdata(pci_dev);
1965 if (!netif_running(dev))
1968 pci_set_power_state(pci_dev, PCI_D0);
1969 pci_restore_state(pci_dev);
1970 pci_enable_wake(pci_dev, PCI_D0, 0);
1972 err = netdev_open(dev);
1974 printk(KERN_ERR "%s: Can't resume interface!\n",
1979 netif_device_attach(dev);
1985 #endif /* CONFIG_PM */
1987 static struct pci_driver sundance_driver = {
1989 .id_table = sundance_pci_tbl,
1990 .probe = sundance_probe1,
1991 .remove = sundance_remove1,
1993 .suspend = sundance_suspend,
1994 .resume = sundance_resume,
1995 #endif /* CONFIG_PM */
1998 static int __init sundance_init(void)
2000 /* when a module, this is printed whether or not devices are found in probe */
2004 return pci_register_driver(&sundance_driver);
2007 static void __exit sundance_exit(void)
2009 pci_unregister_driver(&sundance_driver);
2012 module_init(sundance_init);
2013 module_exit(sundance_exit);