1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
3 Written/copyright 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
20 Information and updates available at
21 http://www.scyld.com/network/epic100.html
22 [this link no longer provides anything useful -jgarzik]
24 ---------------------------------------------------------------------
28 #define DRV_NAME "epic100"
29 #define DRV_VERSION "2.0"
30 #define DRV_RELDATE "June 27, 2006"
32 /* The user-configurable values.
33 These may be modified when a driver module is loaded.*/
35 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
37 /* Used to pass the full-duplex flag, etc. */
38 #define MAX_UNITS 8 /* More are supported, limit only on options */
39 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
40 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
42 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
43 Setting to > 1518 effectively disables this feature. */
44 static int rx_copybreak;
46 /* Operational parameters that are set at compile time. */
48 /* Keep the ring sizes a power of two for operational efficiency.
49 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
50 Making the Tx ring too large decreases the effectiveness of channel
51 bonding and packet priority.
52 There are no ill effects from too-large receive rings. */
53 #define TX_RING_SIZE 256
54 #define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
55 #define RX_RING_SIZE 256
56 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
57 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
59 /* Operational parameters that usually are not changed. */
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT (2*HZ)
63 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
65 /* Bytes transferred to chip before transmission starts. */
66 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
67 #define TX_FIFO_THRESH 256
68 #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/errno.h>
75 #include <linux/ioport.h>
76 #include <linux/slab.h>
77 #include <linux/interrupt.h>
78 #include <linux/pci.h>
79 #include <linux/delay.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/init.h>
84 #include <linux/spinlock.h>
85 #include <linux/ethtool.h>
86 #include <linux/mii.h>
87 #include <linux/crc32.h>
88 #include <linux/bitops.h>
90 #include <asm/uaccess.h>
92 /* These identify the driver base version and may not be removed. */
93 static char version[] __devinitdata =
94 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
95 static char version2[] __devinitdata =
96 " http://www.scyld.com/network/epic100.html\n";
97 static char version3[] __devinitdata =
98 " (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
100 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
101 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
102 MODULE_LICENSE("GPL");
104 module_param(debug, int, 0);
105 module_param(rx_copybreak, int, 0);
106 module_param_array(options, int, NULL, 0);
107 module_param_array(full_duplex, int, NULL, 0);
108 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
109 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
110 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
111 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
116 I. Board Compatibility
118 This device driver is designed for the SMC "EPIC/100", the SMC
119 single-chip Ethernet controllers for PCI. This chip is used on
120 the SMC EtherPower II boards.
122 II. Board-specific settings
124 PCI bus devices are configured by the system at boot time, so no jumpers
125 need to be set on the board. The system BIOS will assign the
126 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
127 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
130 III. Driver operation
136 http://www.smsc.com/main/datasheets/83c171.pdf
137 http://www.smsc.com/main/datasheets/83c175.pdf
138 http://scyld.com/expert/NWay.html
139 http://www.national.com/pf/DP/DP83840A.html
146 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
148 #define EPIC_TOTAL_SIZE 0x100
158 struct epic_chip_info {
160 int drv_flags; /* Driver use, intended as capability flags. */
164 /* indexed by chip_t */
165 static const struct epic_chip_info pci_id_tbl[] = {
166 { "SMSC EPIC/100 83c170", TYPE2_INTR | NO_MII | MII_PWRDWN },
167 { "SMSC EPIC/100 83c170", TYPE2_INTR },
168 { "SMSC EPIC/C 83c175", TYPE2_INTR | MII_PWRDWN },
172 static struct pci_device_id epic_pci_tbl[] = {
173 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
174 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
175 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
176 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
179 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
197 /* Offsets to registers, using the (ugh) SMC names. */
198 enum epic_registers {
199 COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
201 TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
202 MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
203 LAN0=64, /* MAC address. */
204 MC0=80, /* Multicast filter table. */
205 RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
206 PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
209 /* Interrupt register bits, using my own meaningful names. */
211 TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
212 PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
213 RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
214 TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
215 RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
218 StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
219 StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
222 #define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
224 #define EpicNapiEvent (TxEmpty | TxDone | \
225 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
226 #define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
228 static const u16 media2miictl[16] = {
229 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
230 0, 0, 0, 0, 0, 0, 0, 0 };
232 /* The EPIC100 Rx and Tx buffer descriptors. */
234 struct epic_tx_desc {
241 struct epic_rx_desc {
248 enum desc_status_bits {
252 #define PRIV_ALIGN 15 /* Required alignment mask */
253 struct epic_private {
254 struct epic_rx_desc *rx_ring;
255 struct epic_tx_desc *tx_ring;
256 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
257 struct sk_buff* tx_skbuff[TX_RING_SIZE];
258 /* The addresses of receive-in-place skbuffs. */
259 struct sk_buff* rx_skbuff[RX_RING_SIZE];
261 dma_addr_t tx_ring_dma;
262 dma_addr_t rx_ring_dma;
265 spinlock_t lock; /* Group with Tx control cache line. */
266 spinlock_t napi_lock;
267 unsigned int reschedule_in_poll;
268 unsigned int cur_tx, dirty_tx;
270 unsigned int cur_rx, dirty_rx;
272 unsigned int rx_buf_sz; /* Based on MTU+slack. */
274 struct pci_dev *pci_dev; /* PCI bus location. */
275 int chip_id, chip_flags;
277 struct net_device_stats stats;
278 struct timer_list timer; /* Media selection timer. */
280 unsigned char mc_filter[8];
281 signed char phys[4]; /* MII device addresses. */
282 u16 advertising; /* NWay media advertisement */
284 struct mii_if_info mii;
285 unsigned int tx_full:1; /* The Tx queue is full. */
286 unsigned int default_port:4; /* Last dev->if_port value. */
289 static int epic_open(struct net_device *dev);
290 static int read_eeprom(long ioaddr, int location);
291 static int mdio_read(struct net_device *dev, int phy_id, int location);
292 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
293 static void epic_restart(struct net_device *dev);
294 static void epic_timer(unsigned long data);
295 static void epic_tx_timeout(struct net_device *dev);
296 static void epic_init_ring(struct net_device *dev);
297 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
298 static int epic_rx(struct net_device *dev, int budget);
299 static int epic_poll(struct net_device *dev, int *budget);
300 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
301 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
302 static struct ethtool_ops netdev_ethtool_ops;
303 static int epic_close(struct net_device *dev);
304 static struct net_device_stats *epic_get_stats(struct net_device *dev);
305 static void set_rx_mode(struct net_device *dev);
309 static int __devinit epic_init_one (struct pci_dev *pdev,
310 const struct pci_device_id *ent)
312 static int card_idx = -1;
314 int chip_idx = (int) ent->driver_data;
316 struct net_device *dev;
317 struct epic_private *ep;
318 int i, ret, option = 0, duplex = 0;
322 /* when built into the kernel, we only print version if device is found */
324 static int printed_version;
325 if (!printed_version++)
326 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
327 version, version2, version3);
332 ret = pci_enable_device(pdev);
337 if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
338 dev_printk(KERN_ERR, &pdev->dev, "no PCI region space\n");
340 goto err_out_disable;
343 pci_set_master(pdev);
345 ret = pci_request_regions(pdev, DRV_NAME);
347 goto err_out_disable;
351 dev = alloc_etherdev(sizeof (*ep));
353 dev_printk(KERN_ERR, &pdev->dev, "no memory for eth device\n");
354 goto err_out_free_res;
356 SET_MODULE_OWNER(dev);
357 SET_NETDEV_DEV(dev, &pdev->dev);
360 ioaddr = pci_resource_start (pdev, 0);
362 ioaddr = pci_resource_start (pdev, 1);
363 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
365 dev_printk(KERN_ERR, &pdev->dev, "ioremap failed\n");
366 goto err_out_free_netdev;
370 pci_set_drvdata(pdev, dev);
373 ep->mii.mdio_read = mdio_read;
374 ep->mii.mdio_write = mdio_write;
375 ep->mii.phy_id_mask = 0x1f;
376 ep->mii.reg_num_mask = 0x1f;
378 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
380 goto err_out_iounmap;
381 ep->tx_ring = (struct epic_tx_desc *)ring_space;
382 ep->tx_ring_dma = ring_dma;
384 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
386 goto err_out_unmap_tx;
387 ep->rx_ring = (struct epic_rx_desc *)ring_space;
388 ep->rx_ring_dma = ring_dma;
390 if (dev->mem_start) {
391 option = dev->mem_start;
392 duplex = (dev->mem_start & 16) ? 1 : 0;
393 } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
394 if (options[card_idx] >= 0)
395 option = options[card_idx];
396 if (full_duplex[card_idx] >= 0)
397 duplex = full_duplex[card_idx];
400 dev->base_addr = ioaddr;
403 spin_lock_init(&ep->lock);
404 spin_lock_init(&ep->napi_lock);
405 ep->reschedule_in_poll = 0;
407 /* Bring the chip out of low-power mode. */
408 outl(0x4200, ioaddr + GENCTL);
409 /* Magic?! If we don't set this bit the MII interface won't work. */
410 /* This magic is documented in SMSC app note 7.15 */
411 for (i = 16; i > 0; i--)
412 outl(0x0008, ioaddr + TEST1);
414 /* Turn on the MII transceiver. */
415 outl(0x12, ioaddr + MIICfg);
417 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
418 outl(0x0200, ioaddr + GENCTL);
420 /* Note: the '175 does not have a serial EEPROM. */
421 for (i = 0; i < 3; i++)
422 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
425 dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
426 for (i = 0; i < 64; i++)
427 printk(" %4.4x%s", read_eeprom(ioaddr, i),
428 i % 16 == 15 ? "\n" : "");
432 ep->chip_id = chip_idx;
433 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
435 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
436 | CntFull | TxUnderrun | EpicNapiEvent;
438 /* Find the connected MII xcvrs.
439 Doing this in open() would allow detecting external xcvrs later, but
440 takes much time and no cards have external MII. */
442 int phy, phy_idx = 0;
443 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
444 int mii_status = mdio_read(dev, phy, MII_BMSR);
445 if (mii_status != 0xffff && mii_status != 0x0000) {
446 ep->phys[phy_idx++] = phy;
447 dev_printk(KERN_INFO, &pdev->dev,
448 "MII transceiver #%d control "
449 "%4.4x status %4.4x.\n",
450 phy, mdio_read(dev, phy, 0), mii_status);
453 ep->mii_phy_cnt = phy_idx;
456 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
457 dev_printk(KERN_INFO, &pdev->dev,
458 "Autonegotiation advertising %4.4x link "
460 ep->mii.advertising, mdio_read(dev, phy, 5));
461 } else if ( ! (ep->chip_flags & NO_MII)) {
462 dev_printk(KERN_WARNING, &pdev->dev,
463 "***WARNING***: No MII transceiver found!\n");
464 /* Use the known PHY address of the EPII. */
467 ep->mii.phy_id = ep->phys[0];
470 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
471 if (ep->chip_flags & MII_PWRDWN)
472 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
473 outl(0x0008, ioaddr + GENCTL);
475 /* The lower four bits are the media type. */
477 ep->mii.force_media = ep->mii.full_duplex = 1;
478 dev_printk(KERN_INFO, &pdev->dev,
479 "Forced full duplex operation requested.\n");
481 dev->if_port = ep->default_port = option;
483 /* The Epic-specific entries in the device structure. */
484 dev->open = &epic_open;
485 dev->hard_start_xmit = &epic_start_xmit;
486 dev->stop = &epic_close;
487 dev->get_stats = &epic_get_stats;
488 dev->set_multicast_list = &set_rx_mode;
489 dev->do_ioctl = &netdev_ioctl;
490 dev->ethtool_ops = &netdev_ethtool_ops;
491 dev->watchdog_timeo = TX_TIMEOUT;
492 dev->tx_timeout = &epic_tx_timeout;
493 dev->poll = epic_poll;
496 ret = register_netdev(dev);
498 goto err_out_unmap_rx;
500 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
501 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
502 for (i = 0; i < 5; i++)
503 printk("%2.2x:", dev->dev_addr[i]);
504 printk("%2.2x.\n", dev->dev_addr[i]);
510 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
512 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
520 pci_release_regions(pdev);
522 pci_disable_device(pdev);
526 /* Serial EEPROM section. */
528 /* EEPROM_Ctrl bits. */
529 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
530 #define EE_CS 0x02 /* EEPROM chip select. */
531 #define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
532 #define EE_WRITE_0 0x01
533 #define EE_WRITE_1 0x09
534 #define EE_DATA_READ 0x10 /* EEPROM chip data out. */
535 #define EE_ENB (0x0001 | EE_CS)
537 /* Delay between EEPROM clock transitions.
538 This serves to flush the operation to the PCI bus.
541 #define eeprom_delay() inl(ee_addr)
543 /* The EEPROM commands include the alway-set leading bit. */
544 #define EE_WRITE_CMD (5 << 6)
545 #define EE_READ64_CMD (6 << 6)
546 #define EE_READ256_CMD (6 << 8)
547 #define EE_ERASE_CMD (7 << 6)
549 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
551 long ioaddr = dev->base_addr;
553 outl(0x00000000, ioaddr + INTMASK);
556 static inline void __epic_pci_commit(long ioaddr)
559 inl(ioaddr + INTMASK);
563 static inline void epic_napi_irq_off(struct net_device *dev,
564 struct epic_private *ep)
566 long ioaddr = dev->base_addr;
568 outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
569 __epic_pci_commit(ioaddr);
572 static inline void epic_napi_irq_on(struct net_device *dev,
573 struct epic_private *ep)
575 long ioaddr = dev->base_addr;
577 /* No need to commit possible posted write */
578 outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
581 static int __devinit read_eeprom(long ioaddr, int location)
585 long ee_addr = ioaddr + EECTL;
586 int read_cmd = location |
587 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
589 outl(EE_ENB & ~EE_CS, ee_addr);
590 outl(EE_ENB, ee_addr);
592 /* Shift the read command bits out. */
593 for (i = 12; i >= 0; i--) {
594 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
595 outl(EE_ENB | dataval, ee_addr);
597 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
600 outl(EE_ENB, ee_addr);
602 for (i = 16; i > 0; i--) {
603 outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
605 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
606 outl(EE_ENB, ee_addr);
610 /* Terminate the EEPROM access. */
611 outl(EE_ENB & ~EE_CS, ee_addr);
616 #define MII_WRITEOP 2
617 static int mdio_read(struct net_device *dev, int phy_id, int location)
619 long ioaddr = dev->base_addr;
620 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
623 outl(read_cmd, ioaddr + MIICtrl);
624 /* Typical operation takes 25 loops. */
625 for (i = 400; i > 0; i--) {
627 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
628 /* Work around read failure bug. */
629 if (phy_id == 1 && location < 6
630 && inw(ioaddr + MIIData) == 0xffff) {
631 outl(read_cmd, ioaddr + MIICtrl);
634 return inw(ioaddr + MIIData);
640 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
642 long ioaddr = dev->base_addr;
645 outw(value, ioaddr + MIIData);
646 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
647 for (i = 10000; i > 0; i--) {
649 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
656 static int epic_open(struct net_device *dev)
658 struct epic_private *ep = dev->priv;
659 long ioaddr = dev->base_addr;
663 /* Soft reset the chip. */
664 outl(0x4001, ioaddr + GENCTL);
666 if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev)))
671 outl(0x4000, ioaddr + GENCTL);
672 /* This magic is documented in SMSC app note 7.15 */
673 for (i = 16; i > 0; i--)
674 outl(0x0008, ioaddr + TEST1);
676 /* Pull the chip out of low-power mode, enable interrupts, and set for
677 PCI read multiple. The MIIcfg setting and strange write order are
678 required by the details of which bits are reset and the transceiver
679 wiring on the Ositech CardBus card.
682 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
684 if (ep->chip_flags & MII_PWRDWN)
685 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
687 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
688 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
689 inl(ioaddr + GENCTL);
690 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
692 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
693 inl(ioaddr + GENCTL);
694 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
697 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
699 for (i = 0; i < 3; i++)
700 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
702 ep->tx_threshold = TX_FIFO_THRESH;
703 outl(ep->tx_threshold, ioaddr + TxThresh);
705 if (media2miictl[dev->if_port & 15]) {
707 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
708 if (dev->if_port == 1) {
710 printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
712 dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
715 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
716 if (mii_lpa != 0xffff) {
717 if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
718 ep->mii.full_duplex = 1;
719 else if (! (mii_lpa & LPA_LPACK))
720 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
722 printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
723 " register read of %4.4x.\n", dev->name,
724 ep->mii.full_duplex ? "full" : "half",
725 ep->phys[0], mii_lpa);
729 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
730 outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
731 outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
733 /* Start the chip's Rx process. */
735 outl(StartRx | RxQueued, ioaddr + COMMAND);
737 netif_start_queue(dev);
739 /* Enable interrupts by setting the interrupt mask. */
740 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
741 | CntFull | TxUnderrun
742 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
745 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
747 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
748 ep->mii.full_duplex ? "full" : "half");
750 /* Set the timer to switch to check for link beat and perhaps switch
751 to an alternate media type. */
752 init_timer(&ep->timer);
753 ep->timer.expires = jiffies + 3*HZ;
754 ep->timer.data = (unsigned long)dev;
755 ep->timer.function = &epic_timer; /* timer handler */
756 add_timer(&ep->timer);
761 /* Reset the chip to recover from a PCI transaction error.
762 This may occur at interrupt time. */
763 static void epic_pause(struct net_device *dev)
765 long ioaddr = dev->base_addr;
766 struct epic_private *ep = dev->priv;
768 netif_stop_queue (dev);
770 /* Disable interrupts by clearing the interrupt mask. */
771 outl(0x00000000, ioaddr + INTMASK);
772 /* Stop the chip's Tx and Rx DMA processes. */
773 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
775 /* Update the error counts. */
776 if (inw(ioaddr + COMMAND) != 0xffff) {
777 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
778 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
779 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
782 /* Remove the packets on the Rx queue. */
783 epic_rx(dev, RX_RING_SIZE);
786 static void epic_restart(struct net_device *dev)
788 long ioaddr = dev->base_addr;
789 struct epic_private *ep = dev->priv;
792 /* Soft reset the chip. */
793 outl(0x4001, ioaddr + GENCTL);
795 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
796 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
799 /* This magic is documented in SMSC app note 7.15 */
800 for (i = 16; i > 0; i--)
801 outl(0x0008, ioaddr + TEST1);
803 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
804 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
806 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
808 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
809 if (ep->chip_flags & MII_PWRDWN)
810 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
812 for (i = 0; i < 3; i++)
813 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
815 ep->tx_threshold = TX_FIFO_THRESH;
816 outl(ep->tx_threshold, ioaddr + TxThresh);
817 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
818 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
819 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
820 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
821 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
823 /* Start the chip's Rx process. */
825 outl(StartRx | RxQueued, ioaddr + COMMAND);
827 /* Enable interrupts by setting the interrupt mask. */
828 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
829 | CntFull | TxUnderrun
830 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
832 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
833 " interrupt %4.4x.\n",
834 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
835 (int)inl(ioaddr + INTSTAT));
839 static void check_media(struct net_device *dev)
841 struct epic_private *ep = dev->priv;
842 long ioaddr = dev->base_addr;
843 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
844 int negotiated = mii_lpa & ep->mii.advertising;
845 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
847 if (ep->mii.force_media)
849 if (mii_lpa == 0xffff) /* Bogus read */
851 if (ep->mii.full_duplex != duplex) {
852 ep->mii.full_duplex = duplex;
853 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
854 " partner capability of %4.4x.\n", dev->name,
855 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
856 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
860 static void epic_timer(unsigned long data)
862 struct net_device *dev = (struct net_device *)data;
863 struct epic_private *ep = dev->priv;
864 long ioaddr = dev->base_addr;
865 int next_tick = 5*HZ;
868 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
869 dev->name, (int)inl(ioaddr + TxSTAT));
870 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
871 "IntStatus %4.4x RxStatus %4.4x.\n",
872 dev->name, (int)inl(ioaddr + INTMASK),
873 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
878 ep->timer.expires = jiffies + next_tick;
879 add_timer(&ep->timer);
882 static void epic_tx_timeout(struct net_device *dev)
884 struct epic_private *ep = dev->priv;
885 long ioaddr = dev->base_addr;
888 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
889 "Tx status %4.4x.\n",
890 dev->name, (int)inw(ioaddr + TxSTAT));
892 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
893 dev->name, ep->dirty_tx, ep->cur_tx);
896 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
897 ep->stats.tx_fifo_errors++;
898 outl(RestartTx, ioaddr + COMMAND);
901 outl(TxQueued, dev->base_addr + COMMAND);
904 dev->trans_start = jiffies;
905 ep->stats.tx_errors++;
907 netif_wake_queue(dev);
910 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
911 static void epic_init_ring(struct net_device *dev)
913 struct epic_private *ep = dev->priv;
917 ep->dirty_tx = ep->cur_tx = 0;
918 ep->cur_rx = ep->dirty_rx = 0;
919 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
921 /* Initialize all Rx descriptors. */
922 for (i = 0; i < RX_RING_SIZE; i++) {
923 ep->rx_ring[i].rxstatus = 0;
924 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
925 ep->rx_ring[i].next = ep->rx_ring_dma +
926 (i+1)*sizeof(struct epic_rx_desc);
927 ep->rx_skbuff[i] = NULL;
929 /* Mark the last entry as wrapping the ring. */
930 ep->rx_ring[i-1].next = ep->rx_ring_dma;
932 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
933 for (i = 0; i < RX_RING_SIZE; i++) {
934 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
935 ep->rx_skbuff[i] = skb;
938 skb->dev = dev; /* Mark as being used by this device. */
939 skb_reserve(skb, 2); /* 16 byte align the IP header. */
940 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
941 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
942 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
944 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
946 /* The Tx buffer descriptor is filled in as needed, but we
947 do need to clear the ownership bit. */
948 for (i = 0; i < TX_RING_SIZE; i++) {
949 ep->tx_skbuff[i] = NULL;
950 ep->tx_ring[i].txstatus = 0x0000;
951 ep->tx_ring[i].next = ep->tx_ring_dma +
952 (i+1)*sizeof(struct epic_tx_desc);
954 ep->tx_ring[i-1].next = ep->tx_ring_dma;
958 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
960 struct epic_private *ep = dev->priv;
961 int entry, free_count;
965 if (skb_padto(skb, ETH_ZLEN))
968 /* Caution: the write order is important here, set the field with the
969 "ownership" bit last. */
971 /* Calculate the next Tx descriptor entry. */
972 spin_lock_irqsave(&ep->lock, flags);
973 free_count = ep->cur_tx - ep->dirty_tx;
974 entry = ep->cur_tx % TX_RING_SIZE;
976 ep->tx_skbuff[entry] = skb;
977 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
978 skb->len, PCI_DMA_TODEVICE);
979 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
980 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
981 } else if (free_count == TX_QUEUE_LEN/2) {
982 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
983 } else if (free_count < TX_QUEUE_LEN - 1) {
984 ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
986 /* Leave room for an additional entry. */
987 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
990 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
991 ep->tx_ring[entry].txstatus =
992 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
993 | cpu_to_le32(DescOwn);
997 netif_stop_queue(dev);
999 spin_unlock_irqrestore(&ep->lock, flags);
1000 /* Trigger an immediate transmit demand. */
1001 outl(TxQueued, dev->base_addr + COMMAND);
1003 dev->trans_start = jiffies;
1005 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1006 "flag %2.2x Tx status %8.8x.\n",
1007 dev->name, (int)skb->len, entry, ctrl_word,
1008 (int)inl(dev->base_addr + TxSTAT));
1013 static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1016 struct net_device_stats *stats = &ep->stats;
1018 #ifndef final_version
1019 /* There was an major error, log it. */
1021 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1025 if (status & 0x1050)
1026 stats->tx_aborted_errors++;
1027 if (status & 0x0008)
1028 stats->tx_carrier_errors++;
1029 if (status & 0x0040)
1030 stats->tx_window_errors++;
1031 if (status & 0x0010)
1032 stats->tx_fifo_errors++;
1035 static void epic_tx(struct net_device *dev, struct epic_private *ep)
1037 unsigned int dirty_tx, cur_tx;
1040 * Note: if this lock becomes a problem we can narrow the locked
1041 * region at the cost of occasionally grabbing the lock more times.
1043 cur_tx = ep->cur_tx;
1044 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1045 struct sk_buff *skb;
1046 int entry = dirty_tx % TX_RING_SIZE;
1047 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
1049 if (txstatus & DescOwn)
1050 break; /* It still hasn't been Txed */
1052 if (likely(txstatus & 0x0001)) {
1053 ep->stats.collisions += (txstatus >> 8) & 15;
1054 ep->stats.tx_packets++;
1055 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1057 epic_tx_error(dev, ep, txstatus);
1059 /* Free the original skb. */
1060 skb = ep->tx_skbuff[entry];
1061 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1062 skb->len, PCI_DMA_TODEVICE);
1063 dev_kfree_skb_irq(skb);
1064 ep->tx_skbuff[entry] = NULL;
1067 #ifndef final_version
1068 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1070 "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1071 dev->name, dirty_tx, cur_tx, ep->tx_full);
1072 dirty_tx += TX_RING_SIZE;
1075 ep->dirty_tx = dirty_tx;
1076 if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1077 /* The ring is no longer full, allow new TX entries. */
1079 netif_wake_queue(dev);
1083 /* The interrupt handler does all of the Rx thread work and cleans up
1084 after the Tx thread. */
1085 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1087 struct net_device *dev = dev_instance;
1088 struct epic_private *ep = dev->priv;
1089 long ioaddr = dev->base_addr;
1090 unsigned int handled = 0;
1093 status = inl(ioaddr + INTSTAT);
1094 /* Acknowledge all of the current interrupt sources ASAP. */
1095 outl(status & EpicNormalEvent, ioaddr + INTSTAT);
1098 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1099 "intstat=%#8.8x.\n", dev->name, status,
1100 (int)inl(ioaddr + INTSTAT));
1103 if ((status & IntrSummary) == 0)
1108 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1109 spin_lock(&ep->napi_lock);
1110 if (netif_rx_schedule_prep(dev)) {
1111 epic_napi_irq_off(dev, ep);
1112 __netif_rx_schedule(dev);
1114 ep->reschedule_in_poll++;
1115 spin_unlock(&ep->napi_lock);
1117 status &= ~EpicNapiEvent;
1119 /* Check uncommon events all at once. */
1120 if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1121 if (status == EpicRemoved)
1124 /* Always update the error counts to avoid overhead later. */
1125 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1126 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1127 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1129 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1130 ep->stats.tx_fifo_errors++;
1131 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1132 /* Restart the transmit process. */
1133 outl(RestartTx, ioaddr + COMMAND);
1135 if (status & PCIBusErr170) {
1136 printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1141 /* Clear all error sources. */
1142 outl(status & 0x7f18, ioaddr + INTSTAT);
1147 printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1151 return IRQ_RETVAL(handled);
1154 static int epic_rx(struct net_device *dev, int budget)
1156 struct epic_private *ep = dev->priv;
1157 int entry = ep->cur_rx % RX_RING_SIZE;
1158 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1162 printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1163 ep->rx_ring[entry].rxstatus);
1165 if (rx_work_limit > budget)
1166 rx_work_limit = budget;
1168 /* If we own the next entry, it's a new packet. Send it up. */
1169 while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
1170 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1173 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
1174 if (--rx_work_limit < 0)
1176 if (status & 0x2006) {
1178 printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1180 if (status & 0x2000) {
1181 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1182 "multiple buffers, status %4.4x!\n", dev->name, status);
1183 ep->stats.rx_length_errors++;
1184 } else if (status & 0x0006)
1185 /* Rx Frame errors are counted in hardware. */
1186 ep->stats.rx_errors++;
1188 /* Malloc up new buffer, compatible with net-2e. */
1189 /* Omit the four octet CRC from the length. */
1190 short pkt_len = (status >> 16) - 4;
1191 struct sk_buff *skb;
1193 if (pkt_len > PKT_BUF_SZ - 4) {
1194 printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1196 dev->name, status, pkt_len);
1199 /* Check if the packet is long enough to accept without copying
1200 to a minimally-sized skbuff. */
1201 if (pkt_len < rx_copybreak
1202 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1204 skb_reserve(skb, 2); /* 16 byte align the IP header */
1205 pci_dma_sync_single_for_cpu(ep->pci_dev,
1206 ep->rx_ring[entry].bufaddr,
1208 PCI_DMA_FROMDEVICE);
1209 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
1210 skb_put(skb, pkt_len);
1211 pci_dma_sync_single_for_device(ep->pci_dev,
1212 ep->rx_ring[entry].bufaddr,
1214 PCI_DMA_FROMDEVICE);
1216 pci_unmap_single(ep->pci_dev,
1217 ep->rx_ring[entry].bufaddr,
1218 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1219 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1220 ep->rx_skbuff[entry] = NULL;
1222 skb->protocol = eth_type_trans(skb, dev);
1223 netif_receive_skb(skb);
1224 dev->last_rx = jiffies;
1225 ep->stats.rx_packets++;
1226 ep->stats.rx_bytes += pkt_len;
1229 entry = (++ep->cur_rx) % RX_RING_SIZE;
1232 /* Refill the Rx ring buffers. */
1233 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1234 entry = ep->dirty_rx % RX_RING_SIZE;
1235 if (ep->rx_skbuff[entry] == NULL) {
1236 struct sk_buff *skb;
1237 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1240 skb->dev = dev; /* Mark as being used by this device. */
1241 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1242 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1243 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1246 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1251 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1253 long ioaddr = dev->base_addr;
1256 status = inl(ioaddr + INTSTAT);
1258 if (status == EpicRemoved)
1260 if (status & RxOverflow) /* Missed a Rx frame. */
1261 ep->stats.rx_errors++;
1262 if (status & (RxOverflow | RxFull))
1263 outw(RxQueued, ioaddr + COMMAND);
1266 static int epic_poll(struct net_device *dev, int *budget)
1268 struct epic_private *ep = dev->priv;
1269 int work_done = 0, orig_budget;
1270 long ioaddr = dev->base_addr;
1272 orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
1278 work_done += epic_rx(dev, *budget);
1280 epic_rx_err(dev, ep);
1282 *budget -= work_done;
1283 dev->quota -= work_done;
1285 if (netif_running(dev) && (work_done < orig_budget)) {
1286 unsigned long flags;
1289 /* A bit baroque but it avoids a (space hungry) spin_unlock */
1291 spin_lock_irqsave(&ep->napi_lock, flags);
1293 more = ep->reschedule_in_poll;
1295 __netif_rx_complete(dev);
1296 outl(EpicNapiEvent, ioaddr + INTSTAT);
1297 epic_napi_irq_on(dev, ep);
1299 ep->reschedule_in_poll--;
1301 spin_unlock_irqrestore(&ep->napi_lock, flags);
1307 return (work_done >= orig_budget);
1310 static int epic_close(struct net_device *dev)
1312 long ioaddr = dev->base_addr;
1313 struct epic_private *ep = dev->priv;
1314 struct sk_buff *skb;
1317 netif_stop_queue(dev);
1320 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1321 dev->name, (int)inl(ioaddr + INTSTAT));
1323 del_timer_sync(&ep->timer);
1325 epic_disable_int(dev, ep);
1327 free_irq(dev->irq, dev);
1331 /* Free all the skbuffs in the Rx queue. */
1332 for (i = 0; i < RX_RING_SIZE; i++) {
1333 skb = ep->rx_skbuff[i];
1334 ep->rx_skbuff[i] = NULL;
1335 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1336 ep->rx_ring[i].buflength = 0;
1338 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1339 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1342 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1344 for (i = 0; i < TX_RING_SIZE; i++) {
1345 skb = ep->tx_skbuff[i];
1346 ep->tx_skbuff[i] = NULL;
1349 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1350 skb->len, PCI_DMA_TODEVICE);
1354 /* Green! Leave the chip in low-power mode. */
1355 outl(0x0008, ioaddr + GENCTL);
1360 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1362 struct epic_private *ep = dev->priv;
1363 long ioaddr = dev->base_addr;
1365 if (netif_running(dev)) {
1366 /* Update the error counts. */
1367 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1368 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1369 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1375 /* Set or clear the multicast filter for this adaptor.
1376 Note that we only use exclusion around actually queueing the
1377 new frame, not around filling ep->setup_frame. This is non-deterministic
1378 when re-entered but still correct. */
1380 static void set_rx_mode(struct net_device *dev)
1382 long ioaddr = dev->base_addr;
1383 struct epic_private *ep = dev->priv;
1384 unsigned char mc_filter[8]; /* Multicast hash filter */
1387 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1388 outl(0x002C, ioaddr + RxCtrl);
1389 /* Unconditionally log net taps. */
1390 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1391 memset(mc_filter, 0xff, sizeof(mc_filter));
1392 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
1393 /* There is apparently a chip bug, so the multicast filter
1394 is never enabled. */
1395 /* Too many to filter perfectly -- accept all multicasts. */
1396 memset(mc_filter, 0xff, sizeof(mc_filter));
1397 outl(0x000C, ioaddr + RxCtrl);
1398 } else if (dev->mc_count == 0) {
1399 outl(0x0004, ioaddr + RxCtrl);
1401 } else { /* Never executed, for now. */
1402 struct dev_mc_list *mclist;
1404 memset(mc_filter, 0, sizeof(mc_filter));
1405 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1406 i++, mclist = mclist->next) {
1407 unsigned int bit_nr =
1408 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
1409 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1412 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1413 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1414 for (i = 0; i < 4; i++)
1415 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1416 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1421 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1423 struct epic_private *np = dev->priv;
1425 strcpy (info->driver, DRV_NAME);
1426 strcpy (info->version, DRV_VERSION);
1427 strcpy (info->bus_info, pci_name(np->pci_dev));
1430 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1432 struct epic_private *np = dev->priv;
1435 spin_lock_irq(&np->lock);
1436 rc = mii_ethtool_gset(&np->mii, cmd);
1437 spin_unlock_irq(&np->lock);
1442 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1444 struct epic_private *np = dev->priv;
1447 spin_lock_irq(&np->lock);
1448 rc = mii_ethtool_sset(&np->mii, cmd);
1449 spin_unlock_irq(&np->lock);
1454 static int netdev_nway_reset(struct net_device *dev)
1456 struct epic_private *np = dev->priv;
1457 return mii_nway_restart(&np->mii);
1460 static u32 netdev_get_link(struct net_device *dev)
1462 struct epic_private *np = dev->priv;
1463 return mii_link_ok(&np->mii);
1466 static u32 netdev_get_msglevel(struct net_device *dev)
1471 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1476 static int ethtool_begin(struct net_device *dev)
1478 unsigned long ioaddr = dev->base_addr;
1479 /* power-up, if interface is down */
1480 if (! netif_running(dev)) {
1481 outl(0x0200, ioaddr + GENCTL);
1482 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1487 static void ethtool_complete(struct net_device *dev)
1489 unsigned long ioaddr = dev->base_addr;
1490 /* power-down, if interface is down */
1491 if (! netif_running(dev)) {
1492 outl(0x0008, ioaddr + GENCTL);
1493 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1497 static struct ethtool_ops netdev_ethtool_ops = {
1498 .get_drvinfo = netdev_get_drvinfo,
1499 .get_settings = netdev_get_settings,
1500 .set_settings = netdev_set_settings,
1501 .nway_reset = netdev_nway_reset,
1502 .get_link = netdev_get_link,
1503 .get_msglevel = netdev_get_msglevel,
1504 .set_msglevel = netdev_set_msglevel,
1505 .get_sg = ethtool_op_get_sg,
1506 .get_tx_csum = ethtool_op_get_tx_csum,
1507 .begin = ethtool_begin,
1508 .complete = ethtool_complete
1511 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1513 struct epic_private *np = dev->priv;
1514 long ioaddr = dev->base_addr;
1515 struct mii_ioctl_data *data = if_mii(rq);
1518 /* power-up, if interface is down */
1519 if (! netif_running(dev)) {
1520 outl(0x0200, ioaddr + GENCTL);
1521 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1524 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1525 spin_lock_irq(&np->lock);
1526 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1527 spin_unlock_irq(&np->lock);
1529 /* power-down, if interface is down */
1530 if (! netif_running(dev)) {
1531 outl(0x0008, ioaddr + GENCTL);
1532 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1538 static void __devexit epic_remove_one (struct pci_dev *pdev)
1540 struct net_device *dev = pci_get_drvdata(pdev);
1541 struct epic_private *ep = dev->priv;
1543 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1544 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1545 unregister_netdev(dev);
1547 iounmap((void*) dev->base_addr);
1549 pci_release_regions(pdev);
1551 pci_disable_device(pdev);
1552 pci_set_drvdata(pdev, NULL);
1553 /* pci_power_off(pdev, -1); */
1559 static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1561 struct net_device *dev = pci_get_drvdata(pdev);
1562 long ioaddr = dev->base_addr;
1564 if (!netif_running(dev))
1567 /* Put the chip into low-power mode. */
1568 outl(0x0008, ioaddr + GENCTL);
1569 /* pci_power_off(pdev, -1); */
1574 static int epic_resume (struct pci_dev *pdev)
1576 struct net_device *dev = pci_get_drvdata(pdev);
1578 if (!netif_running(dev))
1581 /* pci_power_on(pdev); */
1585 #endif /* CONFIG_PM */
1588 static struct pci_driver epic_driver = {
1590 .id_table = epic_pci_tbl,
1591 .probe = epic_init_one,
1592 .remove = __devexit_p(epic_remove_one),
1594 .suspend = epic_suspend,
1595 .resume = epic_resume,
1596 #endif /* CONFIG_PM */
1600 static int __init epic_init (void)
1602 /* when a module, this is printed whether or not devices are found in probe */
1604 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
1605 version, version2, version3);
1608 return pci_module_init (&epic_driver);
1612 static void __exit epic_cleanup (void)
1614 pci_unregister_driver (&epic_driver);
1618 module_init(epic_init);
1619 module_exit(epic_cleanup);