2 * Davicom DM9000 Fast Ethernet driver for Linux.
3 * Copyright (C) 1997 Sten Wang
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
17 * Additional updates, Copyright:
18 * Ben Dooks <ben@simtec.co.uk>
19 * Sascha Hauer <s.hauer@pengutronix.de>
22 #include <linux/module.h>
23 #include <linux/ioport.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/init.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/crc32.h>
30 #include <linux/mii.h>
31 #include <linux/ethtool.h>
32 #include <linux/dm9000.h>
33 #include <linux/delay.h>
34 #include <linux/platform_device.h>
35 #include <linux/irq.h>
37 #include <asm/delay.h>
40 #include <mach/gpio.h>
41 #include <mach/iomux.h>
45 /* Board/System/Debug information/definition ---------------- */
47 #define DM9000_PHY 0x40 /* PHY address 0x01 */
49 #define CARDNAME "dm9000"
50 #define DRV_VERSION "1.31"
53 * Transmit timeout, default 5 seconds.
55 static int watchdog = 5000;
56 module_param(watchdog, int, 0400);
57 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
59 /* DM9000 register address locking.
61 * The DM9000 uses an address register to control where data written
62 * to the data register goes. This means that the address register
63 * must be preserved over interrupts or similar calls.
65 * During interrupt and other critical calls, a spinlock is used to
66 * protect the system, but the calls themselves save the address
67 * in the address register in case they are interrupting another
68 * access to the device.
70 * For general accesses a lock is provided so that calls which are
71 * allowed to sleep are serialised so that the address register does
72 * not need to be saved. This lock also serves to serialise access
73 * to the EEPROM and PHY access registers which are shared between
77 /* The driver supports the original DM9000E, and now the two newer
78 * devices, DM9000A and DM9000B.
82 TYPE_DM9000E, /* original DM9000 */
87 /* Structure/enum declaration ------------------------------- */
88 typedef struct board_info {
90 void __iomem *io_addr; /* Register I/O base address */
91 void __iomem *io_data; /* Data I/O address */
99 u8 io_mode; /* 0:word, 2:byte */
104 unsigned int in_suspend :1;
107 enum dm9000_type type;
109 #ifdef CONFIG_DM9000_USE_NAND_CONTROL
111 struct work_struct dm9k_work;
112 struct workqueue_struct *dm9000_wq;
115 void (*inblk)(void __iomem *port, void *data, int length);
116 void (*outblk)(void __iomem *port, void *data, int length);
117 void (*dumpblk)(void __iomem *port, int length);
119 struct device *dev; /* parent device */
121 struct resource *addr_res; /* resources found */
122 struct resource *data_res;
123 struct resource *addr_req; /* resources requested */
124 struct resource *data_req;
125 struct resource *irq_res;
127 struct mutex addr_lock; /* phy and eeprom access lock */
129 struct delayed_work phy_poll;
130 struct net_device *ndev;
134 struct mii_if_info mii;
143 #define dm9000_dbg(db, lev, msg...) do { \
144 if ((lev) < CONFIG_DM9000_DEBUGLEVEL && \
145 (lev) < db->debug_level) { \
146 dev_dbg(db->dev, msg); \
150 #if defined(CONFIG_DM9000_USE_NAND_CONTROL) && defined(CONFIG_MTD_NAND_RK2818)
151 extern void rk2818_nand_status_mutex_lock(void);
152 extern int rk2818_nand_status_mutex_trylock(void);
153 extern void rk2818_nand_status_mutex_unlock(void);
155 static void rk2818_nand_status_mutex_lock(void){return;}
156 static int rk2818_nand_status_mutex_trylock(void) {return 1;}
157 static void rk2818_nand_status_mutex_unlock(void) {return;}
160 static inline board_info_t *to_dm9000_board(struct net_device *dev)
162 return netdev_priv(dev);
165 /* DM9000 network board routine ---------------------------- */
168 dm9000_reset(board_info_t * db)
170 dev_dbg(db->dev, "resetting device\n");
173 writeb(DM9000_NCR, db->io_addr);
175 writeb(NCR_RST, db->io_data);
180 * Read a byte from I/O port
183 ior(board_info_t * db, int reg)
185 writeb(reg, db->io_addr);
186 return readb(db->io_data);
190 * Write a byte to I/O port
194 iow(board_info_t * db, int reg, int value)
196 writeb(reg, db->io_addr);
197 writeb(value, db->io_data);
200 /* routines for sending block to chip */
202 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
204 writesb(reg, data, count);
207 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
209 writesw(reg, data, (count+1) >> 1);
212 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
214 writesl(reg, data, (count+3) >> 2);
217 /* input block from chip to memory */
219 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
221 readsb(reg, data, count);
225 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
227 readsw(reg, data, (count+1) >> 1);
230 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
232 readsl(reg, data, (count+3) >> 2);
235 /* dump block from chip to null */
237 static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
242 for (i = 0; i < count; i++)
246 static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
251 count = (count + 1) >> 1;
253 for (i = 0; i < count; i++)
257 static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
262 count = (count + 3) >> 2;
264 for (i = 0; i < count; i++)
270 * select the specified set of io routines to use with the
274 static void dm9000_set_io(struct board_info *db, int byte_width)
276 /* use the size of the data resource to work out what IO
277 * routines we want to use
280 switch (byte_width) {
282 db->dumpblk = dm9000_dumpblk_8bit;
283 db->outblk = dm9000_outblk_8bit;
284 db->inblk = dm9000_inblk_8bit;
289 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
291 db->dumpblk = dm9000_dumpblk_16bit;
292 db->outblk = dm9000_outblk_16bit;
293 db->inblk = dm9000_inblk_16bit;
298 db->dumpblk = dm9000_dumpblk_32bit;
299 db->outblk = dm9000_outblk_32bit;
300 db->inblk = dm9000_inblk_32bit;
305 static void dm9000_schedule_poll(board_info_t *db)
307 if (db->type == TYPE_DM9000E)
308 schedule_delayed_work(&db->phy_poll, HZ * 2);
311 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
313 board_info_t *dm = to_dm9000_board(dev);
315 if (!netif_running(dev))
318 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
322 dm9000_read_locked(board_info_t *db, int reg)
327 rk2818_nand_status_mutex_lock();
329 spin_lock_irqsave(&db->lock, flags);
331 spin_unlock_irqrestore(&db->lock, flags);
333 rk2818_nand_status_mutex_unlock();
338 static int dm9000_wait_eeprom(board_info_t *db)
341 int timeout = 8; /* wait max 8msec */
343 /* The DM9000 data sheets say we should be able to
344 * poll the ERRE bit in EPCR to wait for the EEPROM
345 * operation. From testing several chips, this bit
346 * does not seem to work.
348 * We attempt to use the bit, but fall back to the
349 * timeout (which is why we do not return an error
350 * on expiry) to say that the EEPROM operation has
355 status = dm9000_read_locked(db, DM9000_EPCR);
357 if ((status & EPCR_ERRE) == 0)
363 dev_dbg(db->dev, "timeout waiting EEPROM\n");
372 * Read a word data from EEPROM
375 dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
379 if (db->flags & DM9000_PLATF_NO_EEPROM) {
385 mutex_lock(&db->addr_lock);
387 rk2818_nand_status_mutex_lock();
389 spin_lock_irqsave(&db->lock, flags);
391 iow(db, DM9000_EPAR, offset);
392 iow(db, DM9000_EPCR, EPCR_ERPRR);
394 spin_unlock_irqrestore(&db->lock, flags);
396 rk2818_nand_status_mutex_unlock();
398 dm9000_wait_eeprom(db);
400 /* delay for at-least 150uS */
403 rk2818_nand_status_mutex_lock();
405 spin_lock_irqsave(&db->lock, flags);
407 iow(db, DM9000_EPCR, 0x0);
409 to[0] = ior(db, DM9000_EPDRL);
410 to[1] = ior(db, DM9000_EPDRH);
412 spin_unlock_irqrestore(&db->lock, flags);
414 rk2818_nand_status_mutex_unlock();
416 mutex_unlock(&db->addr_lock);
420 * Write a word data to SROM
423 dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
427 if (db->flags & DM9000_PLATF_NO_EEPROM)
430 mutex_lock(&db->addr_lock);
432 rk2818_nand_status_mutex_lock();
434 spin_lock_irqsave(&db->lock, flags);
435 iow(db, DM9000_EPAR, offset);
436 iow(db, DM9000_EPDRH, data[1]);
437 iow(db, DM9000_EPDRL, data[0]);
438 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
439 spin_unlock_irqrestore(&db->lock, flags);
441 rk2818_nand_status_mutex_unlock();
443 dm9000_wait_eeprom(db);
445 mdelay(1); /* wait at least 150uS to clear */
447 rk2818_nand_status_mutex_lock();
449 spin_lock_irqsave(&db->lock, flags);
450 iow(db, DM9000_EPCR, 0);
451 spin_unlock_irqrestore(&db->lock, flags);
453 rk2818_nand_status_mutex_unlock();
455 mutex_unlock(&db->addr_lock);
460 static void dm9000_get_drvinfo(struct net_device *dev,
461 struct ethtool_drvinfo *info)
463 board_info_t *dm = to_dm9000_board(dev);
465 strcpy(info->driver, CARDNAME);
466 strcpy(info->version, DRV_VERSION);
467 strcpy(info->bus_info, to_platform_device(dm->dev)->name);
470 static u32 dm9000_get_msglevel(struct net_device *dev)
472 board_info_t *dm = to_dm9000_board(dev);
474 return dm->msg_enable;
477 static void dm9000_set_msglevel(struct net_device *dev, u32 value)
479 board_info_t *dm = to_dm9000_board(dev);
481 dm->msg_enable = value;
484 static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
486 board_info_t *dm = to_dm9000_board(dev);
488 mii_ethtool_gset(&dm->mii, cmd);
492 static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
494 board_info_t *dm = to_dm9000_board(dev);
496 return mii_ethtool_sset(&dm->mii, cmd);
499 static int dm9000_nway_reset(struct net_device *dev)
501 board_info_t *dm = to_dm9000_board(dev);
502 return mii_nway_restart(&dm->mii);
505 static uint32_t dm9000_get_rx_csum(struct net_device *dev)
507 board_info_t *dm = to_dm9000_board(dev);
511 static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
513 board_info_t *dm = to_dm9000_board(dev);
519 rk2818_nand_status_mutex_lock();
521 spin_lock_irqsave(&dm->lock, flags);
522 iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
523 spin_unlock_irqrestore(&dm->lock, flags);
524 rk2818_nand_status_mutex_unlock();
532 static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
534 board_info_t *dm = to_dm9000_board(dev);
535 int ret = -EOPNOTSUPP;
538 ret = ethtool_op_set_tx_csum(dev, data);
542 static u32 dm9000_get_link(struct net_device *dev)
544 board_info_t *dm = to_dm9000_board(dev);
547 if (dm->flags & DM9000_PLATF_EXT_PHY)
548 ret = mii_link_ok(&dm->mii);
550 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
555 #define DM_EEPROM_MAGIC (0x444D394B)
557 static int dm9000_get_eeprom_len(struct net_device *dev)
562 static int dm9000_get_eeprom(struct net_device *dev,
563 struct ethtool_eeprom *ee, u8 *data)
565 board_info_t *dm = to_dm9000_board(dev);
566 int offset = ee->offset;
570 /* EEPROM access is aligned to two bytes */
572 if ((len & 1) != 0 || (offset & 1) != 0)
575 if (dm->flags & DM9000_PLATF_NO_EEPROM)
578 ee->magic = DM_EEPROM_MAGIC;
580 for (i = 0; i < len; i += 2)
581 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
586 static int dm9000_set_eeprom(struct net_device *dev,
587 struct ethtool_eeprom *ee, u8 *data)
589 board_info_t *dm = to_dm9000_board(dev);
590 int offset = ee->offset;
594 /* EEPROM access is aligned to two bytes */
596 if ((len & 1) != 0 || (offset & 1) != 0)
599 if (dm->flags & DM9000_PLATF_NO_EEPROM)
602 if (ee->magic != DM_EEPROM_MAGIC)
605 for (i = 0; i < len; i += 2)
606 dm9000_write_eeprom(dm, (offset + i) / 2, data + i);
611 static const struct ethtool_ops dm9000_ethtool_ops = {
612 .get_drvinfo = dm9000_get_drvinfo,
613 .get_settings = dm9000_get_settings,
614 .set_settings = dm9000_set_settings,
615 .get_msglevel = dm9000_get_msglevel,
616 .set_msglevel = dm9000_set_msglevel,
617 .nway_reset = dm9000_nway_reset,
618 .get_link = dm9000_get_link,
619 .get_eeprom_len = dm9000_get_eeprom_len,
620 .get_eeprom = dm9000_get_eeprom,
621 .set_eeprom = dm9000_set_eeprom,
622 .get_rx_csum = dm9000_get_rx_csum,
623 .set_rx_csum = dm9000_set_rx_csum,
624 .get_tx_csum = ethtool_op_get_tx_csum,
625 .set_tx_csum = dm9000_set_tx_csum,
628 static void dm9000_show_carrier(board_info_t *db,
629 unsigned carrier, unsigned nsr)
631 struct net_device *ndev = db->ndev;
632 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
635 dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
636 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
637 (ncr & NCR_FDX) ? "full" : "half");
639 dev_info(db->dev, "%s: link down\n", ndev->name);
643 dm9000_poll_work(struct work_struct *w)
645 struct delayed_work *dw = to_delayed_work(w);
646 board_info_t *db = container_of(dw, board_info_t, phy_poll);
647 struct net_device *ndev = db->ndev;
649 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
650 !(db->flags & DM9000_PLATF_EXT_PHY)) {
651 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
652 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
653 unsigned new_carrier;
655 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
657 if (old_carrier != new_carrier) {
658 if (netif_msg_link(db))
659 dm9000_show_carrier(db, new_carrier, nsr);
662 netif_carrier_off(ndev);
664 netif_carrier_on(ndev);
667 mii_check_media(&db->mii, netif_msg_link(db), 0);
669 if (netif_running(ndev))
670 dm9000_schedule_poll(db);
673 /* dm9000_release_board
675 * release a board, and any mapped resources
679 dm9000_release_board(struct platform_device *pdev, struct board_info *db)
681 /* unmap our resources */
683 iounmap(db->io_addr);
684 iounmap(db->io_data);
686 /* release the resources */
688 release_resource(db->data_req);
691 release_resource(db->addr_req);
695 static unsigned char dm9000_type_to_char(enum dm9000_type type)
698 case TYPE_DM9000E: return 'e';
699 case TYPE_DM9000A: return 'a';
700 case TYPE_DM9000B: return 'b';
707 * Set DM9000 multicast address
710 dm9000_hash_table(struct net_device *dev)
712 board_info_t *db = netdev_priv(dev);
713 struct dev_mc_list *mcptr = dev->mc_list;
714 int mc_cnt = dev->mc_count;
718 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
721 dm9000_dbg(db, 1, "entering %s\n", __func__);
723 rk2818_nand_status_mutex_lock();
725 spin_lock_irqsave(&db->lock, flags);
727 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
728 iow(db, oft, dev->dev_addr[i]);
730 /* Clear Hash Table */
731 for (i = 0; i < 4; i++)
734 /* broadcast address */
735 hash_table[3] = 0x8000;
737 if (dev->flags & IFF_PROMISC)
740 if (dev->flags & IFF_ALLMULTI)
743 /* the multicast address in Hash Table : 64 bits */
744 for (i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
745 hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f;
746 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
749 /* Write the hash table to MAC MD table */
750 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
751 iow(db, oft++, hash_table[i]);
752 iow(db, oft++, hash_table[i] >> 8);
755 iow(db, DM9000_RCR, rcr);
756 spin_unlock_irqrestore(&db->lock, flags);
758 rk2818_nand_status_mutex_unlock();
762 * Initilize dm9000 board
765 dm9000_init_dm9000(struct net_device *dev)
767 board_info_t *db = netdev_priv(dev);
770 dm9000_dbg(db, 1, "entering %s\n", __func__);
773 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
776 dm9000_set_rx_csum(dev, db->rx_csum);
778 /* GPIO0 on pre-activate PHY */
779 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
780 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
781 iow(db, DM9000_GPR, 0); /* Enable PHY */
783 if (db->flags & DM9000_PLATF_EXT_PHY)
784 iow(db, DM9000_NCR, NCR_EXT_PHY);
786 /* Program operating register */
787 iow(db, DM9000_TCR, 0); /* TX Polling clear */
788 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
789 iow(db, DM9000_FCR, 0xff); /* Flow Control */
790 iow(db, DM9000_SMCR, 0); /* Special Mode */
791 /* clear TX status */
792 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
793 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
795 /* Set address filter table */
796 dm9000_hash_table(dev);
798 imr = IMR_PAR | IMR_PTM | IMR_PRM;
799 if (db->type != TYPE_DM9000E)
804 /* Enable TX/RX interrupt mask */
805 iow(db, DM9000_IMR, imr);
807 /* Init Driver variable */
809 db->queue_pkt_len = 0;
810 dev->trans_start = 0;
813 /* Our watchdog timed out. Called by the networking layer */
814 static void dm9000_timeout(struct net_device *dev)
816 board_info_t *db = netdev_priv(dev);
820 /* Save previous register address */
821 reg_save = readb(db->io_addr);
823 rk2818_nand_status_mutex_lock();
825 spin_lock_irqsave(&db->lock, flags);
827 netif_stop_queue(dev);
829 dm9000_init_dm9000(dev);
830 /* We can accept TX packets again */
831 dev->trans_start = jiffies;
832 netif_wake_queue(dev);
834 /* Restore previous register address */
835 writeb(reg_save, db->io_addr);
836 spin_unlock_irqrestore(&db->lock, flags);
838 rk2818_nand_status_mutex_unlock();
841 static void dm9000_send_packet(struct net_device *dev,
845 board_info_t *dm = to_dm9000_board(dev);
847 /* The DM9000 is not smart enough to leave fragmented packets alone. */
848 if (dm->ip_summed != ip_summed) {
849 if (ip_summed == CHECKSUM_NONE)
850 iow(dm, DM9000_TCCR, 0);
852 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
853 dm->ip_summed = ip_summed;
856 /* Set TX length to DM9000 */
857 iow(dm, DM9000_TXPLL, pkt_len);
858 iow(dm, DM9000_TXPLH, pkt_len >> 8);
860 /* Issue TX polling command */
861 iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
865 * Hardware start transmission.
866 * Send a packet to media from the upper layer.
869 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
872 board_info_t *db = netdev_priv(dev);
874 dm9000_dbg(db, 3, "%s:\n", __func__);
876 if (db->tx_pkt_cnt > 1) {
877 dev_dbg(db->dev, "netdev tx busy\n");
878 return NETDEV_TX_BUSY;
881 if (!rk2818_nand_status_mutex_trylock()) {
882 dev_dbg(db->dev, "fun:%s, nand busy\n", __func__);
883 return NETDEV_TX_BUSY;
885 spin_lock_irqsave(&db->lock, flags);
887 /* Move data to DM9000 TX RAM */
888 writeb(DM9000_MWCMD, db->io_addr);
890 (db->outblk)(db->io_data, skb->data, skb->len);
891 dev->stats.tx_bytes += skb->len;
894 /* TX control: First packet immediately send, second packet queue */
895 if (db->tx_pkt_cnt == 1) {
896 dm9000_send_packet(dev, skb->ip_summed, skb->len);
899 db->queue_pkt_len = skb->len;
900 db->queue_ip_summed = skb->ip_summed;
901 netif_stop_queue(dev);
904 spin_unlock_irqrestore(&db->lock, flags);
906 rk2818_nand_status_mutex_unlock();
915 * DM9000 interrupt handler
916 * receive the packet to upper layer, free the transmitted packet
919 static void dm9000_tx_done(struct net_device *dev, board_info_t *db)
921 int tx_status = ior(db, DM9000_NSR); /* Got TX status */
923 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
924 /* One packet sent complete */
926 dev->stats.tx_packets++;
928 if (netif_msg_tx_done(db))
929 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
931 /* Queue packet check & send */
932 if (db->tx_pkt_cnt > 0)
933 dm9000_send_packet(dev, db->queue_ip_summed,
935 netif_wake_queue(dev);
939 struct dm9000_rxhdr {
943 } __attribute__((__packed__));
946 * Received a packet and pass to upper layer
949 dm9000_rx(struct net_device *dev)
951 board_info_t *db = netdev_priv(dev);
952 struct dm9000_rxhdr rxhdr;
958 /* Check packet ready or not */
960 ior(db, DM9000_MRCMDX); /* Dummy read */
962 udelay(1);//add by lyx@20100713,or dm9000_rx will be error in high frequence
965 /* Get most updated data */
966 rxbyte = ior(db, DM9000_MRCMDX); /* Dummy read */
968 rxbyte = readb(db->io_data);
971 /* Status check: this byte must be 0 or 1 */
972 if (rxbyte & DM9000_PKT_ERR) {
973 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
975 iow(db, DM9000_RCR, 0x00); /* Stop Device */
976 iow(db, DM9000_IMR, IMR_PAR); /* Stop INT request */
983 if (!(rxbyte & DM9000_PKT_RDY)) {
984 //printk("packet not ready to receive\n");
988 /* A packet ready now & Get status/length */
990 writeb(DM9000_MRCMD, db->io_addr);
992 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
994 RxLen = le16_to_cpu(rxhdr.RxLen);
996 if (netif_msg_rx_status(db))
997 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
998 rxhdr.RxStatus, RxLen);
1000 /* Packet Status check */
1003 if (netif_msg_rx_err(db))
1004 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1007 if (RxLen > DM9000_PKT_MAX) {
1008 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1011 /* rxhdr.RxStatus is identical to RSR register. */
1012 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1013 RSR_PLE | RSR_RWTO |
1014 RSR_LCS | RSR_RF)) {
1016 if (rxhdr.RxStatus & RSR_FOE) {
1017 if (netif_msg_rx_err(db))
1018 dev_dbg(db->dev, "fifo error\n");
1019 dev->stats.rx_fifo_errors++;
1021 if (rxhdr.RxStatus & RSR_CE) {
1022 if (netif_msg_rx_err(db))
1023 dev_dbg(db->dev, "crc error\n");
1024 dev->stats.rx_crc_errors++;
1026 if (rxhdr.RxStatus & RSR_RF) {
1027 if (netif_msg_rx_err(db))
1028 dev_dbg(db->dev, "length error\n");
1029 dev->stats.rx_length_errors++;
1033 /* Move data from DM9000 */
1035 && ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
1036 skb_reserve(skb, 2);
1037 rdptr = (u8 *) skb_put(skb, RxLen - 4);
1039 /* Read received packet from RX SRAM */
1041 (db->inblk)(db->io_data, rdptr, RxLen);
1042 dev->stats.rx_bytes += RxLen;
1044 /* Pass to upper layer */
1045 skb->protocol = eth_type_trans(skb, dev);
1047 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1048 skb->ip_summed = CHECKSUM_UNNECESSARY;
1050 skb->ip_summed = CHECKSUM_NONE;
1053 dev->stats.rx_packets++;
1056 /* need to dump the packet's data */
1058 (db->dumpblk)(db->io_data, RxLen);
1060 } while (rxbyte & DM9000_PKT_RDY);
1063 #ifdef CONFIG_DM9000_USE_NAND_CONTROL
1064 static void dm9000_interrupt_work(struct work_struct *work)
1066 board_info_t *db = container_of(work, board_info_t, dm9k_work);
1067 struct net_device *dev = db->dev_id;
1069 unsigned long flags;
1072 //printk("entering %s\n", __FUNCTION__);
1074 /* A real interrupt coming */
1076 /* holders of db->lock must always block IRQs */
1078 rk2818_nand_status_mutex_lock();
1080 spin_lock_irqsave(&db->lock, flags);
1082 /* Save previous register address */
1083 reg_save = readb(db->io_addr);
1085 /* Disable all interrupts */
1086 iow(db, DM9000_IMR, IMR_PAR);
1088 /* Got DM9000 interrupt status */
1089 int_status = ior(db, DM9000_ISR); /* Got ISR */
1090 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
1092 if (netif_msg_intr(db))
1093 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1095 /* Received the coming packet */
1096 if (int_status & ISR_PRS)
1099 /* Trnasmit Interrupt check */
1100 if (int_status & ISR_PTS)
1101 dm9000_tx_done(dev, db);
1103 if (db->type != TYPE_DM9000E) {
1104 if (int_status & ISR_LNKCHNG) {
1105 /* fire a link-change request */
1106 schedule_delayed_work(&db->phy_poll, 1);
1110 /* Re-enable interrupt mask */
1111 iow(db, DM9000_IMR, db->imr_all);
1113 /* Restore previous register address */
1114 writeb(reg_save, db->io_addr);
1116 spin_unlock_irqrestore(&db->lock, flags);
1118 rk2818_nand_status_mutex_unlock();
1120 enable_irq(dev->irq);
1124 static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1126 struct net_device *dev = dev_id;
1127 board_info_t *db = netdev_priv(dev);
1129 //printk("enter : %s\n", __FUNCTION__);
1131 db->dev_id = dev_id;
1132 disable_irq_nosync(irq);
1133 queue_work(db->dm9000_wq, &(db->dm9k_work));
1138 static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1140 struct net_device *dev = dev_id;
1141 board_info_t *db = netdev_priv(dev);
1143 unsigned long flags;
1146 dm9000_dbg(db, 3, "entering %s\n", __func__);
1148 /* A real interrupt coming */
1150 /* holders of db->lock must always block IRQs */
1151 spin_lock_irqsave(&db->lock, flags);
1153 /* Save previous register address */
1154 reg_save = readb(db->io_addr);
1156 /* Disable all interrupts */
1157 iow(db, DM9000_IMR, IMR_PAR);
1159 /* Got DM9000 interrupt status */
1160 int_status = ior(db, DM9000_ISR); /* Got ISR */
1161 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
1163 if (netif_msg_intr(db))
1164 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1166 /* Received the coming packet */
1167 if (int_status & ISR_PRS)
1170 /* Trnasmit Interrupt check */
1171 if (int_status & ISR_PTS)
1172 dm9000_tx_done(dev, db);
1174 if (db->type != TYPE_DM9000E) {
1175 if (int_status & ISR_LNKCHNG) {
1176 /* fire a link-change request */
1177 schedule_delayed_work(&db->phy_poll, 1);
1181 /* Re-enable interrupt mask */
1182 iow(db, DM9000_IMR, db->imr_all);
1184 /* Restore previous register address */
1185 writeb(reg_save, db->io_addr);
1187 spin_unlock_irqrestore(&db->lock, flags);
1193 #ifdef CONFIG_NET_POLL_CONTROLLER
1197 static void dm9000_poll_controller(struct net_device *dev)
1199 disable_irq(dev->irq);
1200 dm9000_interrupt(dev->irq, dev);
1201 enable_irq(dev->irq);
1206 * Open the interface.
1207 * The interface is opened whenever "ifconfig" actives it.
1210 dm9000_open(struct net_device *dev)
1212 board_info_t *db = netdev_priv(dev);
1213 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1215 #ifdef CONFIG_DM9000_USE_NAND_CONTROL
1216 db->dm9000_wq = create_workqueue("dm9000 wq");
1217 INIT_WORK(&(db->dm9k_work), dm9000_interrupt_work);
1220 if (netif_msg_ifup(db))
1221 dev_dbg(db->dev, "enabling %s\n", dev->name);
1223 /* If there is no IRQ type specified, default to something that
1224 * may work, and tell the user that this is a problem */
1226 if (irqflags == IRQF_TRIGGER_NONE)
1227 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1229 irqflags |= IRQF_SHARED;
1231 #ifndef CONFIG_MACH_RK2818MID
1232 if (request_irq(dev->irq, dm9000_interrupt, IRQF_TRIGGER_HIGH, dev->name, dev))
1235 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1239 /* Initialize DM9000 board */
1241 dm9000_init_dm9000(dev);
1243 /* Init driver variable */
1246 mii_check_media(&db->mii, netif_msg_link(db), 1);
1247 netif_start_queue(dev);
1249 dm9000_schedule_poll(db);
1255 * Sleep, either by using msleep() or if we are suspending, then
1256 * use mdelay() to sleep.
1258 static void dm9000_msleep(board_info_t *db, unsigned int ms)
1267 * Read a word from phyxcer
1270 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1272 board_info_t *db = netdev_priv(dev);
1273 unsigned long flags;
1274 unsigned int reg_save;
1277 mutex_lock(&db->addr_lock);
1279 rk2818_nand_status_mutex_lock();
1281 spin_lock_irqsave(&db->lock,flags);
1283 /* Save previous register address */
1284 reg_save = readb(db->io_addr);
1286 /* Fill the phyxcer register into REG_0C */
1287 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1289 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
1291 writeb(reg_save, db->io_addr);
1292 spin_unlock_irqrestore(&db->lock,flags);
1294 rk2818_nand_status_mutex_unlock();
1296 dm9000_msleep(db, 1); /* Wait read complete */
1298 rk2818_nand_status_mutex_lock();
1300 spin_lock_irqsave(&db->lock,flags);
1302 reg_save = readb(db->io_addr);
1304 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
1306 /* The read data keeps on REG_0D & REG_0E */
1307 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
1309 /* restore the previous address */
1310 writeb(reg_save, db->io_addr);
1311 spin_unlock_irqrestore(&db->lock,flags);
1313 rk2818_nand_status_mutex_unlock();
1315 mutex_unlock(&db->addr_lock);
1317 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
1322 * Write a word to phyxcer
1325 dm9000_phy_write(struct net_device *dev,
1326 int phyaddr_unused, int reg, int value)
1328 board_info_t *db = netdev_priv(dev);
1329 unsigned long flags;
1330 unsigned long reg_save;
1332 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
1333 mutex_lock(&db->addr_lock);
1335 rk2818_nand_status_mutex_lock();
1337 spin_lock_irqsave(&db->lock,flags);
1339 /* Save previous register address */
1340 reg_save = readb(db->io_addr);
1342 /* Fill the phyxcer register into REG_0C */
1343 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1345 /* Fill the written data into REG_0D & REG_0E */
1346 iow(db, DM9000_EPDRL, value);
1347 iow(db, DM9000_EPDRH, value >> 8);
1349 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
1351 writeb(reg_save, db->io_addr);
1352 spin_unlock_irqrestore(&db->lock, flags);
1354 rk2818_nand_status_mutex_unlock();
1356 dm9000_msleep(db, 1); /* Wait write complete */
1358 rk2818_nand_status_mutex_lock();
1360 spin_lock_irqsave(&db->lock,flags);
1362 reg_save = readb(db->io_addr);
1364 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
1366 /* restore the previous address */
1367 writeb(reg_save, db->io_addr);
1369 spin_unlock_irqrestore(&db->lock, flags);
1371 rk2818_nand_status_mutex_unlock();
1372 mutex_unlock(&db->addr_lock);
1376 dm9000_shutdown(struct net_device *dev)
1378 board_info_t *db = netdev_priv(dev);
1381 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
1382 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
1383 iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */
1384 iow(db, DM9000_RCR, 0x00); /* Disable RX */
1388 * Stop the interface.
1389 * The interface is stopped when it is brought.
1392 dm9000_stop(struct net_device *ndev)
1394 board_info_t *db = netdev_priv(ndev);
1396 if (netif_msg_ifdown(db))
1397 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1399 cancel_delayed_work_sync(&db->phy_poll);
1401 netif_stop_queue(ndev);
1402 netif_carrier_off(ndev);
1404 /* free interrupt */
1405 free_irq(ndev->irq, ndev);
1407 dm9000_shutdown(ndev);
1409 #ifdef CONFIG_DM9000_USE_NAND_CONTROL
1410 destroy_workqueue(db->dm9000_wq);
1416 static const struct net_device_ops dm9000_netdev_ops = {
1417 .ndo_open = dm9000_open,
1418 .ndo_stop = dm9000_stop,
1419 .ndo_start_xmit = dm9000_start_xmit,
1420 .ndo_tx_timeout = dm9000_timeout,
1421 .ndo_set_multicast_list = dm9000_hash_table,
1422 .ndo_do_ioctl = dm9000_ioctl,
1423 .ndo_change_mtu = eth_change_mtu,
1424 .ndo_validate_addr = eth_validate_addr,
1425 .ndo_set_mac_address = eth_mac_addr,
1426 #ifdef CONFIG_NET_POLL_CONTROLLER
1427 .ndo_poll_controller = dm9000_poll_controller,
1432 * Search DM9000 board, allocate space and register it
1434 static int __devinit
1435 dm9000_probe(struct platform_device *pdev)
1437 struct dm9000_plat_data *pdata = pdev->dev.platform_data;
1438 struct board_info *db; /* Point a board information structure */
1439 struct net_device *ndev;
1440 const unsigned char *mac_src;
1446 /* Init network device */
1447 ndev = alloc_etherdev(sizeof(struct board_info));
1449 dev_err(&pdev->dev, "could not allocate device.\n");
1453 SET_NETDEV_DEV(ndev, &pdev->dev);
1455 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1457 /* setup board info structure */
1458 db = netdev_priv(ndev);
1460 db->dev = &pdev->dev;
1463 spin_lock_init(&db->lock);
1464 mutex_init(&db->addr_lock);
1466 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1468 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1469 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1470 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1472 if (db->addr_res == NULL || db->data_res == NULL ||
1473 db->irq_res == NULL) {
1474 dev_err(db->dev, "insufficient resources\n");
1479 iosize = resource_size(db->addr_res);
1480 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1483 if (db->addr_req == NULL) {
1484 dev_err(db->dev, "cannot claim address reg area\n");
1489 db->io_addr = ioremap(db->addr_res->start, iosize);
1491 if (db->io_addr == NULL) {
1492 dev_err(db->dev, "failed to ioremap address reg\n");
1497 iosize = resource_size(db->data_res);
1498 db->data_req = request_mem_region(db->data_res->start, iosize,
1501 if (db->data_req == NULL) {
1502 dev_err(db->dev, "cannot claim data reg area\n");
1507 db->io_data = ioremap(db->data_res->start, iosize);
1509 if (db->io_data == NULL) {
1510 dev_err(db->dev, "failed to ioremap data reg\n");
1515 /* fill in parameters for net-dev structure */
1516 ndev->base_addr = (unsigned long)db->io_addr;
1519 rk2818_mux_api_set(GPIOA5_FLASHCS1_SEL_NAME, IOMUXB_FLASH_CS1);
1521 #ifdef CONFIG_MACH_RK2818MID
1522 rk2818_mux_api_set(GPIOE_SPI1_FLASH_SEL1_NAME, IOMUXA_GPIO1_A12);
1523 ndev->irq = gpio_to_irq(db->irq_res->start);
1525 rk2818_mux_api_set(GPIOA1_HOSTDATA17_SEL_NAME, IOMUXB_GPIO0_A1);
1526 if (gpio_request(db->irq_res->start, "dm9000 interrupt")) {
1527 gpio_free(db->irq_res->start);
1528 dev_err(db->dev, "failed to request gpio\n");
1532 gpio_pull_updown(db->irq_res->start, GPIOPullDown);
1533 ndev->irq = gpio_to_irq(db->irq_res->start);
1537 if (pdata->net_gpio_set) {
1538 if (pdata->net_gpio_set()) {
1544 ndev->irq = gpio_to_irq(pdata->pin_int);
1546 /* ensure at least we have a default set of IO routines */
1547 dm9000_set_io(db, iosize);
1549 /* check to see if anything is being over-ridden */
1550 if (pdata != NULL) {
1551 /* check to see if the driver wants to over-ride the
1552 * default IO width */
1554 if (pdata->flags & DM9000_PLATF_8BITONLY)
1555 dm9000_set_io(db, 1);
1557 if (pdata->flags & DM9000_PLATF_16BITONLY)
1558 dm9000_set_io(db, 2);
1560 if (pdata->flags & DM9000_PLATF_32BITONLY)
1561 dm9000_set_io(db, 4);
1563 /* check to see if there are any IO routine
1566 if (pdata->inblk != NULL)
1567 db->inblk = pdata->inblk;
1569 if (pdata->outblk != NULL)
1570 db->outblk = pdata->outblk;
1572 if (pdata->dumpblk != NULL)
1573 db->dumpblk = pdata->dumpblk;
1575 db->flags = pdata->flags;
1578 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1579 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1584 /* try multiple times, DM9000 sometimes gets the read wrong */
1585 for (i = 0; i < 8; i++) {
1586 id_val = ior(db, DM9000_VIDL);
1587 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1588 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1589 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1591 if (id_val == DM9000_ID)
1593 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1596 if (id_val != DM9000_ID) {
1597 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1602 /* Identify what type of DM9000 we are working on */
1604 id_val = ior(db, DM9000_CHIPR);
1605 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1609 db->type = TYPE_DM9000A;
1612 db->type = TYPE_DM9000B;
1615 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1616 db->type = TYPE_DM9000E;
1619 /* dm9000a/b are capable of hardware checksum offload */
1620 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1623 ndev->features |= NETIF_F_IP_CSUM;
1626 /* from this point we assume that we have found a DM9000 */
1628 /* driver system function */
1631 ndev->netdev_ops = &dm9000_netdev_ops;
1632 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1633 ndev->ethtool_ops = &dm9000_ethtool_ops;
1635 db->msg_enable = NETIF_MSG_LINK;
1636 db->mii.phy_id_mask = 0x1f;
1637 db->mii.reg_num_mask = 0x1f;
1638 db->mii.force_media = 0;
1639 db->mii.full_duplex = 0;
1641 db->mii.mdio_read = dm9000_phy_read;
1642 db->mii.mdio_write = dm9000_phy_write;
1646 /* try reading the node address from the attached EEPROM */
1647 for (i = 0; i < 6; i += 2)
1648 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1650 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1651 mac_src = "platform data";
1652 memcpy(ndev->dev_addr, pdata->dev_addr, 6);
1655 if (!is_valid_ether_addr(ndev->dev_addr)) {
1656 /* try reading from mac */
1659 for (i = 0; i < 6; i++)
1660 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1663 if (!is_valid_ether_addr(ndev->dev_addr))
1664 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1665 "set using ifconfig\n", ndev->name);
1667 platform_set_drvdata(pdev, ndev);
1668 ret = register_netdev(ndev);
1671 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1672 ndev->name, dm9000_type_to_char(db->type),
1673 db->io_addr, db->io_data, ndev->irq,
1674 ndev->dev_addr, mac_src);
1676 dm9000_shutdown(ndev);//add by lyx@20100713, reduce power consume
1681 dev_err(db->dev, "not found (%d).\n", ret);
1683 dm9000_release_board(pdev, db);
1690 dm9000_drv_suspend(struct device *dev)
1692 struct platform_device *pdev = to_platform_device(dev);
1693 struct net_device *ndev = platform_get_drvdata(pdev);
1697 db = netdev_priv(ndev);
1700 if (netif_running(ndev)) {
1701 netif_device_detach(ndev);
1702 dm9000_shutdown(ndev);
1709 dm9000_drv_resume(struct device *dev)
1711 struct platform_device *pdev = to_platform_device(dev);
1712 struct net_device *ndev = platform_get_drvdata(pdev);
1713 board_info_t *db = netdev_priv(ndev);
1717 if (netif_running(ndev)) {
1719 dm9000_init_dm9000(ndev);
1721 netif_device_attach(ndev);
1729 static struct dev_pm_ops dm9000_drv_pm_ops = {
1730 .suspend = dm9000_drv_suspend,
1731 .resume = dm9000_drv_resume,
1734 static int __devexit
1735 dm9000_drv_remove(struct platform_device *pdev)
1737 struct net_device *ndev = platform_get_drvdata(pdev);
1739 platform_set_drvdata(pdev, NULL);
1741 unregister_netdev(ndev);
1742 dm9000_release_board(pdev, (board_info_t *) netdev_priv(ndev));
1743 free_netdev(ndev); /* free device structure */
1745 dev_dbg(&pdev->dev, "released and freed device\n");
1749 static struct platform_driver dm9000_driver = {
1752 .owner = THIS_MODULE,
1753 .pm = &dm9000_drv_pm_ops,
1755 .probe = dm9000_probe,
1756 .remove = __devexit_p(dm9000_drv_remove),
1762 printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
1764 return platform_driver_register(&dm9000_driver);
1768 dm9000_cleanup(void)
1770 platform_driver_unregister(&dm9000_driver);
1773 module_init(dm9000_init);
1774 module_exit(dm9000_cleanup);
1776 MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1777 MODULE_DESCRIPTION("Davicom DM9000 network driver");
1778 MODULE_LICENSE("GPL");
1779 MODULE_ALIAS("platform:dm9000");