2 * linux/arch/arc/drivers/arcvmac.c
\r
4 * Copyright (C) 2003-2006 Codito Technologies, for linux-2.4 port
\r
5 * Copyright (C) 2006-2007 Celunite Inc, for linux-2.6 port
\r
6 * Copyright (C) 2007-2008 Sagem Communications, Fehmi HAFSI
\r
7 * Copyright (C) 2009 Sagem Communications, Andreas Fenkart
\r
8 * All Rights Reserved.
\r
10 * This program is free software; you can redistribute it and/or modify
\r
11 * it under the terms of the GNU General Public License as published by
\r
12 * the Free Software Foundation; either version 2 of the License, or
\r
13 * (at your option) any later version.
\r
15 * This program is distributed in the hope that it will be useful,
\r
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
\r
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
\r
18 * GNU General Public License for more details.
\r
20 * You should have received a copy of the GNU General Public License
\r
21 * along with this program; if not, write to the Free Software
\r
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
\r
24 * external PHY support based on dnet.c
\r
25 * ring management based on bcm63xx_enet.c
\r
27 * Authors: amit.bhor@celunite.com, sameer.dhavale@celunite.com
\r
32 #include <linux/clk.h>
\r
33 #include <linux/crc32.h>
\r
34 #include <linux/delay.h>
\r
35 #include <linux/dma-mapping.h>
\r
36 #include <linux/etherdevice.h>
\r
37 #include <linux/init.h>
\r
38 #include <linux/io.h>
\r
39 #include <linux/kernel.h>
\r
40 #include <linux/module.h>
\r
41 #include <linux/moduleparam.h>
\r
42 #include <linux/netdevice.h>
\r
43 #include <linux/phy.h>
\r
44 #include <linux/platform_device.h>
\r
45 #include <linux/slab.h>
\r
46 #include <linux/types.h>
\r
48 #include <mach/iomux.h>
\r
49 #include <mach/gpio.h>
\r
50 #include <mach/cru.h>
\r
51 #include <mach/board.h>
\r
52 #include "rk29_vmac.h"
\r
54 /* Register access macros */
\r
55 #define vmac_writel(port, value, reg) \
\r
56 writel((value), (port)->regs + reg##_OFFSET)
\r
57 #define vmac_readl(port, reg) readl((port)->regs + reg##_OFFSET)
\r
59 static unsigned char *read_mac_reg(struct net_device *dev,
\r
60 unsigned char hwaddr[ETH_ALEN])
\r
62 struct vmac_priv *ap = netdev_priv(dev);
\r
63 unsigned mac_lo, mac_hi;
\r
66 mac_lo = vmac_readl(ap, ADDRL);
\r
67 mac_hi = vmac_readl(ap, ADDRH);
\r
69 hwaddr[0] = (mac_lo >> 0) & 0xff;
\r
70 hwaddr[1] = (mac_lo >> 8) & 0xff;
\r
71 hwaddr[2] = (mac_lo >> 16) & 0xff;
\r
72 hwaddr[3] = (mac_lo >> 24) & 0xff;
\r
73 hwaddr[4] = (mac_hi >> 0) & 0xff;
\r
74 hwaddr[5] = (mac_hi >> 8) & 0xff;
\r
78 static void write_mac_reg(struct net_device *dev, unsigned char* hwaddr)
\r
80 struct vmac_priv *ap = netdev_priv(dev);
\r
81 unsigned mac_lo, mac_hi;
\r
83 mac_lo = hwaddr[3] << 24 | hwaddr[2] << 16 | hwaddr[1] << 8 | hwaddr[0];
\r
84 mac_hi = hwaddr[5] << 8 | hwaddr[4];
\r
86 vmac_writel(ap, mac_lo, ADDRL);
\r
87 vmac_writel(ap, mac_hi, ADDRH);
\r
90 static void vmac_mdio_xmit(struct vmac_priv *ap, unsigned val)
\r
92 init_completion(&ap->mdio_complete);
\r
93 vmac_writel(ap, val, MDIO_DATA);
\r
94 wait_for_completion(&ap->mdio_complete);
\r
97 static int vmac_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
\r
99 struct vmac_priv *vmac = bus->priv;
\r
101 /* only 5 bits allowed for phy-addr and reg_offset */
\r
102 WARN_ON(phy_id & ~0x1f || phy_reg & ~0x1f);
\r
104 val = MDIO_BASE | MDIO_OP_READ;
\r
105 val |= phy_id << 23 | phy_reg << 18;
\r
106 vmac_mdio_xmit(vmac, val);
\r
108 val = vmac_readl(vmac, MDIO_DATA);
\r
109 return val & MDIO_DATA_MASK;
\r
112 static int vmac_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
\r
115 struct vmac_priv *vmac = bus->priv;
\r
117 /* only 5 bits allowed for phy-addr and reg_offset */
\r
118 WARN_ON(phy_id & ~0x1f || phy_reg & ~0x1f);
\r
120 val = MDIO_BASE | MDIO_OP_WRITE;
\r
121 val |= phy_id << 23 | phy_reg << 18;
\r
122 val |= (value & MDIO_DATA_MASK);
\r
123 vmac_mdio_xmit(vmac, val);
\r
127 static void vmac_handle_link_change(struct net_device *dev)
\r
129 struct vmac_priv *ap = netdev_priv(dev);
\r
130 struct phy_device *phydev = ap->phy_dev;
\r
131 unsigned long flags;
\r
132 int report_change = 0;
\r
134 spin_lock_irqsave(&ap->lock, flags);
\r
136 if (phydev->duplex != ap->duplex) {
\r
139 tmp = vmac_readl(ap, CONTROL);
\r
141 if (phydev->duplex)
\r
146 vmac_writel(ap, tmp, CONTROL);
\r
148 ap->duplex = phydev->duplex;
\r
152 if (phydev->speed != ap->speed) {
\r
153 ap->speed = phydev->speed;
\r
157 if (phydev->link != ap->link) {
\r
158 ap->link = phydev->link;
\r
162 spin_unlock_irqrestore(&ap->lock, flags);
\r
165 phy_print_status(ap->phy_dev);
\r
168 static int __devinit vmac_mii_probe(struct net_device *dev)
\r
170 struct vmac_priv *ap = netdev_priv(dev);
\r
171 struct phy_device *phydev = NULL;
\r
172 struct clk *sys_clk;
\r
173 unsigned long clock_rate;
\r
176 /* find the first phy */
\r
177 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
\r
178 if (ap->mii_bus->phy_map[phy_addr]) {
\r
179 phydev = ap->mii_bus->phy_map[phy_addr];
\r
185 dev_err(&dev->dev, "no PHY found\n");
\r
189 /* add pin_irq, if avail */
\r
190 phydev = phy_connect(dev, dev_name(&phydev->dev),
\r
191 &vmac_handle_link_change, 0,
\r
192 //PHY_INTERFACE_MODE_MII);
\r
193 PHY_INTERFACE_MODE_RMII);//????????
\r
194 if (IS_ERR(phydev)) {
\r
195 err = PTR_ERR(phydev);
\r
196 dev_err(&dev->dev, "could not attach to PHY %d\n", err);
\r
200 phydev->supported &= PHY_BASIC_FEATURES;
\r
201 phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
\r
204 sys_clk = clk_get(NULL, "mac_ref");////////
\r
205 if (IS_ERR(sys_clk)) {
\r
206 err = PTR_ERR(sys_clk);
\r
207 goto err_disconnect;
\r
210 clk_set_rate(sys_clk,50000000);
\r
211 clock_rate = clk_get_rate(sys_clk);
\r
214 printk("%s::%d --mac clock = %d\n",__func__, __LINE__, clock_rate);
\r
215 dev_dbg(&ap->pdev->dev, "clk_get: dev_name : %s %lu\n",
\r
216 dev_name(&ap->pdev->dev),
\r
219 if (clock_rate < 25000000)
\r
220 phydev->supported &= ~(SUPPORTED_100baseT_Half |
\r
221 SUPPORTED_100baseT_Full);
\r
224 phydev->advertising = phydev->supported;
\r
229 ap->phy_dev = phydev;
\r
234 phy_disconnect(phydev);
\r
239 static int __devinit vmac_mii_init(struct vmac_priv *ap)
\r
243 ap->mii_bus = mdiobus_alloc();
\r
245 if (ap->mii_bus == NULL)
\r
248 ap->mii_bus->name = "vmac_mii_bus";
\r
249 ap->mii_bus->read = &vmac_mdio_read;
\r
250 ap->mii_bus->write = &vmac_mdio_write;
\r
252 snprintf(ap->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
\r
254 ap->mii_bus->priv = ap;
\r
257 ap->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
\r
258 if (!ap->mii_bus->irq)
\r
261 for (i = 0; i < PHY_MAX_ADDR; i++)
\r
262 ap->mii_bus->irq[i] = PHY_POLL;
\r
265 /* FIXME: what is it used for? */
\r
266 platform_set_drvdata(ap->dev, ap->mii_bus);
\r
269 err = mdiobus_register(ap->mii_bus);
\r
271 goto err_out_free_mdio_irq;
\r
273 err = vmac_mii_probe(ap->dev);
\r
275 goto err_out_unregister_bus;
\r
279 err_out_unregister_bus:
\r
280 mdiobus_unregister(ap->mii_bus);
\r
281 err_out_free_mdio_irq:
\r
282 kfree(ap->mii_bus->irq);
\r
284 mdiobus_free(ap->mii_bus);
\r
288 static void vmac_mii_exit(struct net_device *dev)
\r
290 struct vmac_priv *ap = netdev_priv(dev);
\r
293 phy_disconnect(ap->phy_dev);
\r
295 mdiobus_unregister(ap->mii_bus);
\r
296 kfree(ap->mii_bus->irq);
\r
297 mdiobus_free(ap->mii_bus);
\r
300 static int vmacether_get_settings(struct net_device *dev,
\r
301 struct ethtool_cmd *cmd)
\r
303 struct vmac_priv *ap = netdev_priv(dev);
\r
304 struct phy_device *phydev = ap->phy_dev;
\r
309 return phy_ethtool_gset(phydev, cmd);
\r
312 static int vmacether_set_settings(struct net_device *dev,
\r
313 struct ethtool_cmd *cmd)
\r
315 struct vmac_priv *ap = netdev_priv(dev);
\r
316 struct phy_device *phydev = ap->phy_dev;
\r
321 return phy_ethtool_sset(phydev, cmd);
\r
324 static int vmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
\r
326 struct vmac_priv *ap = netdev_priv(dev);
\r
327 struct phy_device *phydev = ap->phy_dev;
\r
329 if (!netif_running(dev))
\r
335 return phy_mii_ioctl(phydev, rq, cmd);
\r
338 static void vmacether_get_drvinfo(struct net_device *dev,
\r
339 struct ethtool_drvinfo *info)
\r
341 struct vmac_priv *ap = netdev_priv(dev);
\r
343 strlcpy(info->driver, VMAC_NAME, sizeof(info->driver));
\r
344 strlcpy(info->version, VMAC_VERSION, sizeof(info->version));
\r
345 snprintf(info->bus_info, sizeof(info->bus_info),
\r
346 "platform 0x%x", ap->mem_base);
\r
349 static int update_error_counters(struct net_device *dev, int status)
\r
351 struct vmac_priv *ap = netdev_priv(dev);
\r
352 dev_dbg(&ap->pdev->dev, "rx error counter overrun. status = 0x%x\n",
\r
355 /* programming error */
\r
356 WARN_ON(status & TXCH_MASK);
\r
357 WARN_ON(!(status & (MSER_MASK | RXCR_MASK | RXFR_MASK | RXFL_MASK)));
\r
359 if (status & MSER_MASK)
\r
360 ap->stats.rx_over_errors += 256; /* ran out of BD */
\r
361 if (status & RXCR_MASK)
\r
362 ap->stats.rx_crc_errors += 256;
\r
363 if (status & RXFR_MASK)
\r
364 ap->stats.rx_frame_errors += 256;
\r
365 if (status & RXFL_MASK)
\r
366 ap->stats.rx_fifo_errors += 256;
\r
371 static void update_tx_errors(struct net_device *dev, int status)
\r
373 struct vmac_priv *ap = netdev_priv(dev);
\r
376 ap->stats.tx_fifo_errors++;
\r
381 /* half duplex flags */
\r
383 ap->stats.tx_window_errors++;
\r
384 if (status & RETRY_CT)
\r
385 ap->stats.collisions += (status & RETRY_CT) >> 24;
\r
386 if (status & DROP) /* too many retries */
\r
387 ap->stats.tx_aborted_errors++;
\r
388 if (status & DEFER)
\r
389 dev_vdbg(&ap->pdev->dev, "\"defer to traffic\"\n");
\r
390 if (status & CARLOSS)
\r
391 ap->stats.tx_carrier_errors++;
\r
394 static int vmac_rx_reclaim_force(struct net_device *dev)
\r
396 struct vmac_priv *ap = netdev_priv(dev);
\r
401 dev_dbg(&ap->pdev->dev, "%s need to release %d rx sk_buff\n",
\r
402 __func__, fifo_used(&ap->rx_ring));
\r
404 while (!fifo_empty(&ap->rx_ring) && ct++ < ap->rx_ring.size) {
\r
405 struct vmac_buffer_desc *desc;
\r
406 struct sk_buff *skb;
\r
409 desc_idx = ap->rx_ring.tail;
\r
410 desc = &ap->rxbd[desc_idx];
\r
411 fifo_inc_tail(&ap->rx_ring);
\r
413 if (!ap->rx_skbuff[desc_idx]) {
\r
414 dev_err(&ap->pdev->dev, "non-populated rx_skbuff found %d\n",
\r
419 skb = ap->rx_skbuff[desc_idx];
\r
420 ap->rx_skbuff[desc_idx] = NULL;
\r
422 dma_unmap_single(&ap->pdev->dev, desc->data, skb->len,
\r
425 dev_kfree_skb(skb);
\r
428 if (!fifo_empty(&ap->rx_ring)) {
\r
429 dev_err(&ap->pdev->dev, "failed to reclaim %d rx sk_buff\n",
\r
430 fifo_used(&ap->rx_ring));
\r
436 static int vmac_rx_refill(struct net_device *dev)
\r
438 struct vmac_priv *ap = netdev_priv(dev);
\r
440 WARN_ON(fifo_full(&ap->rx_ring));
\r
442 while (!fifo_full(&ap->rx_ring)) {
\r
443 struct vmac_buffer_desc *desc;
\r
444 struct sk_buff *skb;
\r
448 desc_idx = ap->rx_ring.head;
\r
449 desc = &ap->rxbd[desc_idx];
\r
451 /* make sure we read the actual descriptor status */
\r
454 if (ap->rx_skbuff[desc_idx]) {
\r
455 /* dropped packet / buffer chaining */
\r
456 fifo_inc_head(&ap->rx_ring);
\r
458 /* return to DMA */
\r
460 desc->info = OWN_MASK | ap->rx_skb_size;
\r
464 skb = netdev_alloc_skb(dev, ap->rx_skb_size + 2);
\r
466 dev_info(&ap->pdev->dev, "failed to allocate rx_skb, skb's left %d\n",
\r
467 fifo_used(&ap->rx_ring));
\r
471 /* IP header Alignment (14 byte Ethernet header) */
\r
472 skb_reserve(skb, 2);
\r
473 WARN_ON(skb->len != 0); /* nothing received yet */
\r
475 ap->rx_skbuff[desc_idx] = skb;
\r
477 p = dma_map_single(&ap->pdev->dev, skb->data, ap->rx_skb_size,
\r
483 desc->info = OWN_MASK | ap->rx_skb_size;
\r
485 fifo_inc_head(&ap->rx_ring);
\r
488 /* If rx ring is still empty, set a timer to try allocating
\r
489 * again at a later time. */
\r
490 if (fifo_empty(&ap->rx_ring) && netif_running(dev)) {
\r
491 dev_warn(&ap->pdev->dev, "unable to refill rx ring\n");
\r
492 ap->rx_timeout.expires = jiffies + HZ;
\r
493 add_timer(&ap->rx_timeout);
\r
500 * timer callback to defer refill rx queue in case we're OOM
\r
502 static void vmac_refill_rx_timer(unsigned long data)
\r
504 struct net_device *dev;
\r
505 struct vmac_priv *ap;
\r
507 dev = (struct net_device *)data;
\r
508 ap = netdev_priv(dev);
\r
510 spin_lock(&ap->rx_lock);
\r
511 vmac_rx_refill(dev);
\r
512 spin_unlock(&ap->rx_lock);
\r
515 /* merge buffer chaining */
\r
516 struct sk_buff *vmac_merge_rx_buffers(struct net_device *dev,
\r
517 struct vmac_buffer_desc *after,
\r
518 int pkt_len) /* data */
\r
520 struct vmac_priv *ap = netdev_priv(dev);
\r
521 struct sk_buff *merge_skb, *cur_skb;
\r
522 struct dma_fifo *rx_ring;
\r
523 struct vmac_buffer_desc *desc;
\r
525 rx_ring = &ap->rx_ring;
\r
526 desc = &ap->rxbd[rx_ring->tail];
\r
528 WARN_ON(desc == after);
\r
533 /* IP header Alignment (14 byte Ethernet header) */
\r
534 merge_skb = netdev_alloc_skb(dev, pkt_len + 2);
\r
536 dev_err(&ap->pdev->dev, "failed to allocate merged rx_skb, rx skb's left %d\n",
\r
537 fifo_used(rx_ring));
\r
542 skb_reserve(merge_skb, 2);
\r
544 while (desc != after && pkt_len) {
\r
545 struct vmac_buffer_desc *desc;
\r
546 int buf_len, valid;
\r
548 /* desc needs wrapping */
\r
549 desc = &ap->rxbd[rx_ring->tail];
\r
550 cur_skb = ap->rx_skbuff[rx_ring->tail];
\r
553 dma_unmap_single(&ap->pdev->dev, desc->data, ap->rx_skb_size,
\r
556 /* do not copy FCS */
\r
557 buf_len = desc->info & LEN_MASK;
\r
558 valid = min(pkt_len, buf_len);
\r
561 memcpy(skb_put(merge_skb, valid), cur_skb->data, valid);
\r
563 fifo_inc_tail(rx_ring);
\r
566 /* merging_pressure++ */
\r
568 if (unlikely(pkt_len != 0))
\r
569 dev_err(&ap->pdev->dev, "buffer chaining bytes missing %d\n",
\r
572 WARN_ON(desc != after);
\r
577 int vmac_rx_receive(struct net_device *dev, int budget)
\r
579 struct vmac_priv *ap = netdev_priv(dev);
\r
580 struct vmac_buffer_desc *first;
\r
581 int processed, pkt_len, pkt_err;
\r
582 struct dma_fifo lookahead;
\r
587 pkt_err = pkt_len = 0;
\r
589 /* look ahead, till packet complete */
\r
590 lookahead = ap->rx_ring;
\r
593 struct vmac_buffer_desc *desc; /* cur_ */
\r
594 int desc_idx; /* cur_ */
\r
595 struct sk_buff *skb; /* pkt_ */
\r
597 desc_idx = lookahead.tail;
\r
598 desc = &ap->rxbd[desc_idx];
\r
600 /* make sure we read the actual descriptor status */
\r
603 /* break if dma ownership belongs to hw */
\r
604 if (desc->info & OWN_MASK) {
\r
605 ap->mac_rxring_head = vmac_readl(ap, MAC_RXRING_HEAD);
\r
609 if (desc->info & FRST_MASK) {
\r
613 /* don't free current */
\r
614 ap->rx_ring.tail = lookahead.tail;
\r
618 fifo_inc_tail(&lookahead);
\r
622 pkt_len += desc->info & LEN_MASK;
\r
623 pkt_err |= (desc->info & BUFF);
\r
625 if (!(desc->info & LAST_MASK))
\r
628 /* received complete packet */
\r
630 if (unlikely(pkt_err || !first)) {
\r
631 /* recycle buffers */
\r
632 ap->rx_ring.tail = lookahead.tail;
\r
636 WARN_ON(!(first->info & FRST_MASK) ||
\r
637 !(desc->info & LAST_MASK));
\r
640 /* -- valid packet -- */
\r
642 if (first != desc) {
\r
643 skb = vmac_merge_rx_buffers(dev, desc, pkt_len);
\r
647 ap->rx_ring.tail = lookahead.tail;
\r
648 ap->rx_merge_error++;
\r
652 dma_unmap_single(&ap->pdev->dev, desc->data,
\r
653 ap->rx_skb_size, DMA_FROM_DEVICE);
\r
655 skb = ap->rx_skbuff[desc_idx];
\r
656 ap->rx_skbuff[desc_idx] = NULL;
\r
657 /* desc->data != skb->data => desc->data DMA mapped */
\r
660 skb_put(skb, pkt_len - 4);
\r
664 ap->rx_ring.tail = lookahead.tail;
\r
666 WARN_ON(skb->len != pkt_len - 4);
\r
669 skb->protocol = eth_type_trans(skb, dev);
\r
670 ap->stats.rx_packets++;
\r
671 ap->stats.rx_bytes += skb->len;
\r
672 dev->last_rx = jiffies;
\r
675 } while (!fifo_empty(&lookahead) && (processed < budget));
\r
677 dev_vdbg(&ap->pdev->dev, "processed pkt %d, remaining rx buff %d\n",
\r
679 fifo_used(&ap->rx_ring));
\r
681 if (processed || fifo_empty(&ap->rx_ring))
\r
682 vmac_rx_refill(dev);
\r
687 static void vmac_toggle_irqmask(struct net_device *dev, int enable, int mask)
\r
689 struct vmac_priv *ap = netdev_priv(dev);
\r
692 tmp = vmac_readl(ap, ENABLE);
\r
697 vmac_writel(ap, tmp, ENABLE);
\r
700 static void vmac_toggle_txint(struct net_device *dev, int enable)
\r
702 struct vmac_priv *ap = netdev_priv(dev);
\r
703 unsigned long flags;
\r
705 spin_lock_irqsave(&ap->lock, flags);
\r
706 vmac_toggle_irqmask(dev, enable, TXINT_MASK);
\r
707 spin_unlock_irqrestore(&ap->lock, flags);
\r
710 static void vmac_toggle_rxint(struct net_device *dev, int enable)
\r
712 vmac_toggle_irqmask(dev, enable, RXINT_MASK);
\r
715 static int vmac_poll(struct napi_struct *napi, int budget)
\r
717 struct vmac_priv *ap;
\r
718 struct net_device *dev;
\r
720 unsigned long flags;
\r
722 ap = container_of(napi, struct vmac_priv, napi);
\r
725 /* ack interrupt */
\r
726 vmac_writel(ap, RXINT_MASK, STAT);
\r
728 spin_lock(&ap->rx_lock);
\r
729 rx_work_done = vmac_rx_receive(dev, budget);
\r
730 spin_unlock(&ap->rx_lock);
\r
732 #ifdef VERBOSE_DEBUG
\r
733 if (printk_ratelimit()) {
\r
734 dev_vdbg(&ap->pdev->dev, "poll budget %d receive rx_work_done %d\n",
\r
740 if (rx_work_done >= budget) {
\r
741 /* rx queue is not yet empty/clean */
\r
742 return rx_work_done;
\r
745 /* no more packet in rx/tx queue, remove device from poll
\r
747 spin_lock_irqsave(&ap->lock, flags);
\r
748 napi_complete(napi);
\r
749 vmac_toggle_rxint(dev, 1);
\r
750 spin_unlock_irqrestore(&ap->lock, flags);
\r
752 return rx_work_done;
\r
755 static int vmac_tx_reclaim(struct net_device *dev, int force);
\r
757 static irqreturn_t vmac_intr(int irq, void *dev_instance)
\r
759 struct net_device *dev = dev_instance;
\r
760 struct vmac_priv *ap = netdev_priv(dev);
\r
761 unsigned int status;
\r
763 spin_lock(&ap->lock);
\r
765 status = vmac_readl(ap, STAT);
\r
766 vmac_writel(ap, status, STAT);
\r
769 if (unlikely(ap->shutdown))
\r
770 dev_err(&ap->pdev->dev, "ISR during close\n");
\r
772 if (unlikely(!status & (RXINT_MASK|MDIO_MASK|ERR_MASK)))
\r
773 dev_err(&ap->pdev->dev, "No source of IRQ found\n");
\r
776 if ((status & RXINT_MASK) &&
\r
777 (ap->mac_rxring_head !=
\r
778 vmac_readl(ap, MAC_RXRING_HEAD))) {
\r
779 vmac_toggle_rxint(dev, 0);
\r
780 napi_schedule(&ap->napi);
\r
783 if (unlikely(netif_queue_stopped(dev) && (status & TXINT_MASK)))
\r
784 vmac_tx_reclaim(dev, 0);
\r
786 if (status & MDIO_MASK)
\r
787 complete(&ap->mdio_complete);
\r
789 if (unlikely(status & ERR_MASK))
\r
790 update_error_counters(dev, status);
\r
792 spin_unlock(&ap->lock);
\r
794 return IRQ_HANDLED;
\r
797 static int vmac_tx_reclaim(struct net_device *dev, int force)
\r
799 struct vmac_priv *ap = netdev_priv(dev);
\r
802 /* buffer chaining not used, see vmac_start_xmit */
\r
804 while (!fifo_empty(&ap->tx_ring)) {
\r
805 struct vmac_buffer_desc *desc;
\r
806 struct sk_buff *skb;
\r
809 desc_idx = ap->tx_ring.tail;
\r
810 desc = &ap->txbd[desc_idx];
\r
812 /* ensure other field of the descriptor were not read
\r
813 * before we checked ownership */
\r
816 if ((desc->info & OWN_MASK) && !force)
\r
819 if (desc->info & ERR_MSK_TX) {
\r
820 update_tx_errors(dev, desc->info);
\r
821 /* recycle packet, let upper level deal with it */
\r
824 skb = ap->tx_skbuff[desc_idx];
\r
825 ap->tx_skbuff[desc_idx] = NULL;
\r
828 dma_unmap_single(&ap->pdev->dev, desc->data, skb->len,
\r
831 dev_kfree_skb_any(skb);
\r
834 fifo_inc_tail(&ap->tx_ring);
\r
837 if (netif_queue_stopped(dev) && released) {
\r
838 netif_wake_queue(dev);
\r
839 vmac_toggle_txint(dev, 0);
\r
842 if (unlikely(force && !fifo_empty(&ap->tx_ring))) {
\r
843 dev_err(&ap->pdev->dev, "failed to reclaim %d tx sk_buff\n",
\r
844 fifo_used(&ap->tx_ring));
\r
850 int vmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
\r
852 struct vmac_priv *ap = netdev_priv(dev);
\r
853 struct vmac_buffer_desc *desc;
\r
856 /* running under xmit lock */
\r
858 /* no scatter/gatter see features below */
\r
859 WARN_ON(skb_shinfo(skb)->nr_frags != 0);
\r
860 WARN_ON(skb->len > MAX_TX_BUFFER_LEN);
\r
862 if (unlikely(fifo_full(&ap->tx_ring))) {
\r
863 netif_stop_queue(dev);
\r
864 vmac_toggle_txint(dev, 1);
\r
865 dev_err(&ap->pdev->dev, "xmit called with no tx desc available\n");
\r
866 return NETDEV_TX_BUSY;
\r
869 if (unlikely(skb->len < ETH_ZLEN)) {
\r
870 struct sk_buff *short_skb;
\r
871 short_skb = netdev_alloc_skb(dev, ETH_ZLEN);
\r
873 return NETDEV_TX_LOCKED;
\r
875 memset(short_skb->data, 0, ETH_ZLEN);
\r
876 memcpy(skb_put(short_skb, ETH_ZLEN), skb->data, skb->len);
\r
877 dev_kfree_skb(skb);
\r
881 /* fill descriptor */
\r
882 ap->tx_skbuff[ap->tx_ring.head] = skb;
\r
884 desc = &ap->txbd[ap->tx_ring.head];
\r
885 desc->data = dma_map_single(&ap->pdev->dev, skb->data, skb->len,
\r
888 /* dma might already be polling */
\r
890 desc->info = OWN_MASK | FRST_MASK | LAST_MASK | skb->len;
\r
894 tmp = vmac_readl(ap, STAT);
\r
895 vmac_writel(ap, tmp | TXPL_MASK, STAT);
\r
897 ap->stats.tx_packets++;
\r
898 ap->stats.tx_bytes += skb->len;
\r
899 dev->trans_start = jiffies;
\r
900 fifo_inc_head(&ap->tx_ring);
\r
902 /* vmac_tx_reclaim independent of vmac_tx_timeout */
\r
903 if (fifo_used(&ap->tx_ring) > 8)
\r
904 vmac_tx_reclaim(dev, 0);
\r
906 /* stop queue if no more desc available */
\r
907 if (fifo_full(&ap->tx_ring)) {
\r
908 netif_stop_queue(dev);
\r
909 vmac_toggle_txint(dev, 1);
\r
912 return NETDEV_TX_OK;
\r
915 static int alloc_buffers(struct net_device *dev)
\r
917 struct vmac_priv *ap = netdev_priv(dev);
\r
921 fifo_init(&ap->rx_ring, RX_BDT_LEN);
\r
922 fifo_init(&ap->tx_ring, TX_BDT_LEN);
\r
924 /* initialize skb list */
\r
925 memset(ap->rx_skbuff, 0, sizeof(ap->rx_skbuff));
\r
926 memset(ap->tx_skbuff, 0, sizeof(ap->tx_skbuff));
\r
928 /* allocate DMA received descriptors */
\r
929 size = sizeof(*ap->rxbd) * ap->rx_ring.size;
\r
930 ap->rxbd = dma_alloc_coherent(&ap->pdev->dev, size,
\r
933 if (ap->rxbd == NULL)
\r
936 /* allocate DMA transmit descriptors */
\r
937 size = sizeof(*ap->txbd) * ap->tx_ring.size;
\r
938 ap->txbd = dma_alloc_coherent(&ap->pdev->dev, size,
\r
941 if (ap->txbd == NULL)
\r
942 goto err_free_rxbd;
\r
944 /* ensure 8-byte aligned */
\r
945 WARN_ON(((int)ap->txbd & 0x7) || ((int)ap->rxbd & 0x7));
\r
947 memset(ap->txbd, 0, sizeof(*ap->txbd) * ap->tx_ring.size);
\r
948 memset(ap->rxbd, 0, sizeof(*ap->rxbd) * ap->rx_ring.size);
\r
950 /* allocate rx skb */
\r
951 err = vmac_rx_refill(dev);
\r
953 goto err_free_txbd;
\r
958 dma_free_coherent(&ap->pdev->dev, sizeof(*ap->txbd) * ap->tx_ring.size,
\r
959 ap->txbd, ap->txbd_dma);
\r
961 dma_free_coherent(&ap->pdev->dev, sizeof(*ap->rxbd) * ap->rx_ring.size,
\r
962 ap->rxbd, ap->rxbd_dma);
\r
967 static int free_buffers(struct net_device *dev)
\r
969 struct vmac_priv *ap = netdev_priv(dev);
\r
972 vmac_tx_reclaim(dev, 1);
\r
973 vmac_rx_reclaim_force(dev);
\r
975 /* free DMA ring */
\r
976 dma_free_coherent(&ap->pdev->dev, sizeof(ap->txbd) * ap->tx_ring.size,
\r
977 ap->txbd, ap->txbd_dma);
\r
978 dma_free_coherent(&ap->pdev->dev, sizeof(ap->rxbd) * ap->rx_ring.size,
\r
979 ap->rxbd, ap->rxbd_dma);
\r
984 static int vmac_hw_init(struct net_device *dev)
\r
986 struct vmac_priv *priv = netdev_priv(dev);
\r
988 /* clear IRQ mask */
\r
989 vmac_writel(priv, 0, ENABLE);
\r
991 /* clear pending IRQ */
\r
992 vmac_writel(priv, 0xffffffff, STAT);
\r
994 /* Initialize logical address filter */
\r
995 vmac_writel(priv, 0x0, LAFL);
\r
996 vmac_writel(priv, 0x0, LAFH);
\r
1002 static int vmac_register_print(struct net_device *dev)
\r
1004 struct vmac_priv *ap = netdev_priv(dev);
\r
1006 printk("func::%s vmac register %s value = 0x%x\n", __func__, "ID", vmac_readl(ap, ID));
\r
1007 printk("func::%s vmac register %s value = 0x%x\n", __func__, "STAT", vmac_readl(ap, STAT));
\r
1008 printk("func::%s vmac register %s value = 0x%x\n", __func__, "ENABLE", vmac_readl(ap, ENABLE));
\r
1009 printk("func::%s vmac register %s value = 0x%x\n", __func__, "CONTROL", vmac_readl(ap, CONTROL));
\r
1010 printk("func::%s vmac register %s value = 0x%x\n", __func__, "ADDRL", vmac_readl(ap, ADDRL));
\r
1011 printk("func::%s vmac register %s value = 0x%x\n", __func__, "ADDRH", vmac_readl(ap, ADDRH));
\r
1017 int vmac_open(struct net_device *dev)
\r
1019 struct vmac_priv *ap = netdev_priv(dev);
\r
1020 struct phy_device *phydev;
\r
1021 unsigned int temp;
\r
1029 vmac_hw_init(dev);
\r
1031 /* mac address changed? */
\r
1032 write_mac_reg(dev, dev->dev_addr);
\r
1034 err = alloc_buffers(dev);
\r
1038 err = request_irq(dev->irq, &vmac_intr, 0, dev->name, dev);
\r
1040 dev_err(&ap->pdev->dev, "Unable to request IRQ %d (error %d)\n",
\r
1042 goto err_free_buffers;
\r
1045 /* install DMA ring pointers */
\r
1046 vmac_writel(ap, ap->rxbd_dma, RXRINGPTR);
\r
1047 vmac_writel(ap, ap->txbd_dma, TXRINGPTR);
\r
1049 /* set poll rate to 1 ms */
\r
1050 vmac_writel(ap, POLLRATE_TIME, POLLRATE);
\r
1052 /* make sure we enable napi before rx interrupt */
\r
1053 napi_enable(&ap->napi);
\r
1056 temp = RXINT_MASK | ERR_MASK | TXCH_MASK | MDIO_MASK;
\r
1057 vmac_writel(ap, temp, ENABLE);
\r
1060 temp = (RX_BDT_LEN << 24) | (TX_BDT_LEN << 16) | TXRN_MASK | RXRN_MASK;
\r
1061 vmac_writel(ap, temp, CONTROL);
\r
1063 /* enable, after all other bits are set */
\r
1064 vmac_writel(ap, temp | EN_MASK, CONTROL);
\r
1066 netif_start_queue(dev);
\r
1067 netif_carrier_off(dev);
\r
1070 vmac_register_print(dev);
\r
1073 /* register the PHY board fixup, if needed */
\r
1074 err = vmac_mii_init(ap);
\r
1076 goto err_free_irq;
\r
1078 /* schedule a link state check */
\r
1079 phy_start(ap->phy_dev);
\r
1081 phydev = ap->phy_dev;
\r
1082 dev_info(&ap->pdev->dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
\r
1083 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
\r
1088 free_irq(dev->irq, dev);
\r
1090 free_buffers(dev);
\r
1095 int vmac_close(struct net_device *dev)
\r
1097 struct vmac_priv *ap = netdev_priv(dev);
\r
1098 unsigned int temp;
\r
1100 netif_stop_queue(dev);
\r
1101 napi_disable(&ap->napi);
\r
1103 /* stop running transfers */
\r
1104 temp = vmac_readl(ap, CONTROL);
\r
1105 temp &= ~(TXRN_MASK | RXRN_MASK);
\r
1106 vmac_writel(ap, temp, CONTROL);
\r
1108 del_timer_sync(&ap->rx_timeout);
\r
1111 phy_stop(ap->phy_dev);
\r
1112 vmac_mii_exit(dev);
\r
1113 netif_carrier_off(dev);
\r
1115 /* disable interrupts */
\r
1116 vmac_writel(ap, 0, ENABLE);
\r
1117 free_irq(dev->irq, dev);
\r
1119 /* turn off vmac */
\r
1120 vmac_writel(ap, 0, CONTROL);
\r
1121 /* vmac_reset_hw(vmac) */
\r
1126 free_buffers(dev);
\r
1131 void vmac_update_stats(struct vmac_priv *ap)
\r
1133 struct net_device_stats *_stats = &ap->stats;
\r
1134 unsigned long miss, rxerr;
\r
1135 unsigned long rxfram, rxcrc, rxoflow;
\r
1137 /* compare with /proc/net/dev,
\r
1138 * see net/core/dev.c:dev_seq_printf_stats */
\r
1141 rxerr = vmac_readl(ap, RXERR);
\r
1142 miss = vmac_readl(ap, MISS);
\r
1144 rxcrc = (rxerr & RXERR_CRC);
\r
1145 rxfram = (rxerr & RXERR_FRM) >> 8;
\r
1146 rxoflow = (rxerr & RXERR_OFLO) >> 16;
\r
1148 _stats->rx_length_errors = 0;
\r
1149 _stats->rx_over_errors += miss;
\r
1150 _stats->rx_crc_errors += rxcrc;
\r
1151 _stats->rx_frame_errors += rxfram;
\r
1152 _stats->rx_fifo_errors += rxoflow;
\r
1153 _stats->rx_missed_errors = 0;
\r
1155 /* TODO check rx_dropped/rx_errors/tx_dropped/tx_errors have not
\r
1156 * been updated elsewhere */
\r
1157 _stats->rx_dropped = _stats->rx_over_errors +
\r
1158 _stats->rx_fifo_errors +
\r
1159 ap->rx_merge_error;
\r
1161 _stats->rx_errors = _stats->rx_length_errors + _stats->rx_crc_errors +
\r
1162 _stats->rx_frame_errors +
\r
1163 _stats->rx_missed_errors +
\r
1164 _stats->rx_dropped;
\r
1167 _stats->tx_dropped = 0; /* otherwise queue stopped */
\r
1169 _stats->tx_errors = _stats->tx_aborted_errors +
\r
1170 _stats->tx_carrier_errors +
\r
1171 _stats->tx_fifo_errors +
\r
1172 _stats->tx_heartbeat_errors +
\r
1173 _stats->tx_window_errors +
\r
1174 _stats->tx_dropped +
\r
1175 ap->tx_timeout_error;
\r
1178 struct net_device_stats *vmac_stats(struct net_device *dev)
\r
1180 struct vmac_priv *ap = netdev_priv(dev);
\r
1181 unsigned long flags;
\r
1183 spin_lock_irqsave(&ap->lock, flags);
\r
1184 vmac_update_stats(ap);
\r
1185 spin_unlock_irqrestore(&ap->lock, flags);
\r
1187 return &ap->stats;
\r
1190 void vmac_tx_timeout(struct net_device *dev)
\r
1192 struct vmac_priv *ap = netdev_priv(dev);
\r
1193 unsigned int status;
\r
1194 unsigned long flags;
\r
1196 spin_lock_irqsave(&ap->lock, flags);
\r
1198 /* queue did not progress for timeo jiffies */
\r
1199 WARN_ON(!netif_queue_stopped(dev));
\r
1200 WARN_ON(!fifo_full(&ap->tx_ring));
\r
1202 /* TX IRQ lost? */
\r
1203 status = vmac_readl(ap, STAT);
\r
1204 if (status & TXINT_MASK) {
\r
1205 dev_err(&ap->pdev->dev, "lost tx interrupt, IRQ mask %x\n",
\r
1206 vmac_readl(ap, ENABLE));
\r
1207 vmac_writel(ap, TXINT_MASK, STAT);
\r
1210 /* TODO RX/MDIO/ERR as well? */
\r
1212 vmac_tx_reclaim(dev, 0);
\r
1213 if (fifo_full(&ap->tx_ring))
\r
1214 dev_err(&ap->pdev->dev, "DMA state machine not active\n");
\r
1216 /* We can accept TX packets again */
\r
1217 ap->tx_timeout_error++;
\r
1218 dev->trans_start = jiffies;
\r
1219 netif_wake_queue(dev);
\r
1221 spin_unlock_irqrestore(&ap->lock, flags);
\r
1224 static void create_multicast_filter(struct net_device *dev,
\r
1225 unsigned long *bitmask)
\r
1228 struct netdev_hw_addr *ha;
\r
1229 unsigned long crc;
\r
1231 struct netdev_hw_addr_list *list = &dev->dev_addrs;
\r
1233 printk("-----------------func %s-------------------\n", __func__);
\r
1235 WARN_ON(dev->mc_count == 0);
\r
1236 WARN_ON(dev->flags & IFF_ALLMULTI);
\r
1238 bitmask[0] = bitmask[1] = 0;
\r
1240 list_for_each_entry(ha, &list->list, list) {
\r
1243 /* skip non-multicast addresses */
\r
1244 if (!(*addrs & 1))
\r
1247 crc = ether_crc_le(ETH_ALEN, addrs);
\r
1248 set_bit(crc >> 26, bitmask);
\r
1252 struct netdev_hw_addr *ha;
\r
1253 unsigned long crc;
\r
1256 WARN_ON(netdev_mc_count(dev) == 0);
\r
1257 WARN_ON(dev->flags & IFF_ALLMULTI);
\r
1259 bitmask[0] = bitmask[1] = 0;
\r
1261 netdev_for_each_mc_addr(ha, dev) {
\r
1264 /* skip non-multicast addresses */
\r
1265 if (!(*addrs & 1))
\r
1268 crc = ether_crc_le(ETH_ALEN, addrs);
\r
1269 set_bit(crc >> 26, bitmask);
\r
1273 static void vmac_set_multicast_list(struct net_device *dev)
\r
1276 struct vmac_priv *ap = netdev_priv(dev);
\r
1277 unsigned long flags, bitmask[2];
\r
1280 printk("-----------------func %s-------------------\n", __func__);
\r
1282 spin_lock_irqsave(&ap->lock, flags);
\r
1284 promisc = !!(dev->flags & IFF_PROMISC);
\r
1285 reg = vmac_readl(ap, ENABLE);
\r
1286 if (promisc != !!(reg & PROM_MASK)) {
\r
1288 vmac_writel(ap, reg, ENABLE);
\r
1291 if (dev->flags & IFF_ALLMULTI)
\r
1292 memset(bitmask, 1, sizeof(bitmask));
\r
1293 else if (dev->mc_count == 0)
\r
1294 memset(bitmask, 0, sizeof(bitmask));
\r
1296 create_multicast_filter(dev, bitmask);
\r
1298 vmac_writel(ap, bitmask[0], LAFL);
\r
1299 vmac_writel(ap, bitmask[1], LAFH);
\r
1301 spin_unlock_irqrestore(&ap->lock, flags);
\r
1303 struct vmac_priv *ap = netdev_priv(dev);
\r
1304 unsigned long flags, bitmask[2];
\r
1307 spin_lock_irqsave(&ap->lock, flags);
\r
1309 promisc = !!(dev->flags & IFF_PROMISC);
\r
1310 reg = vmac_readl(ap, ENABLE);
\r
1311 if (promisc != !!(reg & PROM_MASK)) {
\r
1313 vmac_writel(ap, reg, ENABLE);
\r
1316 if (dev->flags & IFF_ALLMULTI)
\r
1317 memset(bitmask, 1, sizeof(bitmask));
\r
1318 else if (netdev_mc_count(dev) == 0)
\r
1319 memset(bitmask, 0, sizeof(bitmask));
\r
1321 create_multicast_filter(dev, bitmask);
\r
1323 vmac_writel(ap, bitmask[0], LAFL);
\r
1324 vmac_writel(ap, bitmask[1], LAFH);
\r
1326 spin_unlock_irqrestore(&ap->lock, flags);
\r
1330 static struct ethtool_ops vmac_ethtool_ops = {
\r
1331 .get_settings = vmacether_get_settings,
\r
1332 .set_settings = vmacether_set_settings,
\r
1333 .get_drvinfo = vmacether_get_drvinfo,
\r
1334 .get_link = ethtool_op_get_link,
\r
1337 static const struct net_device_ops vmac_netdev_ops = {
\r
1338 .ndo_open = vmac_open,
\r
1339 .ndo_stop = vmac_close,
\r
1340 .ndo_get_stats = vmac_stats,
\r
1341 .ndo_start_xmit = vmac_start_xmit,
\r
1342 .ndo_do_ioctl = vmac_ioctl,
\r
1343 .ndo_set_mac_address = eth_mac_addr,
\r
1344 .ndo_tx_timeout = vmac_tx_timeout,
\r
1345 .ndo_set_multicast_list = vmac_set_multicast_list,
\r
1346 .ndo_validate_addr = eth_validate_addr,
\r
1347 .ndo_change_mtu = eth_change_mtu,
\r
1350 static int __devinit vmac_probe(struct platform_device *pdev)
\r
1352 struct net_device *dev;
\r
1353 struct vmac_priv *ap;
\r
1354 struct resource *res;
\r
1355 unsigned int mem_base, mem_size, irq;
\r
1357 struct clk *sys_clk;
\r
1358 struct rk29_vmac_platform_data *pdata = pdev->dev.platform_data;
\r
1360 dev = alloc_etherdev(sizeof(*ap));
\r
1362 dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
\r
1366 ap = netdev_priv(dev);
\r
1369 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
\r
1371 dev_err(&pdev->dev, "no mmio resource defined\n");
\r
1374 mem_base = res->start;
\r
1375 mem_size = resource_size(res);
\r
1376 irq = platform_get_irq(pdev, 0);
\r
1379 if (!request_mem_region(mem_base, mem_size, VMAC_NAME)) {
\r
1380 dev_err(&pdev->dev, "no memory region available\n");
\r
1385 ap->regs = ioremap(mem_base, mem_size);
\r
1387 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
\r
1388 goto err_out_release_mem;
\r
1391 /* no checksum support, hence no scatter/gather */
\r
1392 dev->features |= NETIF_F_HIGHDMA;
\r
1394 spin_lock_init(&ap->lock);
\r
1396 SET_NETDEV_DEV(dev, &pdev->dev);
\r
1400 /* init rx timeout (used for oom) */
\r
1401 init_timer(&ap->rx_timeout);
\r
1402 ap->rx_timeout.function = vmac_refill_rx_timer;
\r
1403 ap->rx_timeout.data = (unsigned long)dev;
\r
1405 netif_napi_add(dev, &ap->napi, vmac_poll, 2);
\r
1406 dev->netdev_ops = &vmac_netdev_ops;
\r
1407 dev->ethtool_ops = &vmac_ethtool_ops;
\r
1410 dev->flags |= IFF_MULTICAST;////////////////////
\r
1412 dev->base_addr = (unsigned long)ap->regs;
\r
1413 ap->mem_base = mem_base;
\r
1415 /* prevent buffer chaining, favor speed over space */
\r
1416 ap->rx_skb_size = ETH_FRAME_LEN + VMAC_BUFFER_PAD;
\r
1418 /* private struct functional */
\r
1420 /* mac address intialize, set vmac_open */
\r
1421 read_mac_reg(dev, dev->dev_addr);
\r
1423 if (!is_valid_ether_addr(dev->dev_addr))
\r
1424 random_ether_addr(dev->dev_addr);
\r
1426 err = register_netdev(dev);
\r
1428 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
\r
1429 goto err_out_iounmap;
\r
1432 dev_info(&pdev->dev, "ARC VMAC at 0x%08x irq %d %pM\n", mem_base,
\r
1433 dev->irq, dev->dev_addr);
\r
1434 platform_set_drvdata(pdev, dev);
\r
1436 //config rk29 vmac as rmii, 100MHz
\r
1437 if (pdata && pdata->vmac_register_set)
\r
1438 pdata->vmac_register_set();
\r
1440 //set rmii ref clock 50MHz
\r
1441 sys_clk = clk_get(NULL, "mac_ref_div");////////
\r
1442 clk_set_rate(sys_clk,50000000);
\r
1444 sys_clk = clk_get(NULL, "mac_ref");////////
\r
1445 clk_set_rate(sys_clk,50000000);
\r
1448 if (pdata && pdata->rmii_io_init)
\r
1449 pdata->rmii_io_init();
\r
1454 iounmap(ap->regs);
\r
1455 err_out_release_mem:
\r
1456 release_mem_region(mem_base, mem_size);
\r
1462 static int __devexit vmac_remove(struct platform_device *pdev)
\r
1464 struct net_device *dev;
\r
1465 struct vmac_priv *ap;
\r
1466 struct resource *res;
\r
1468 dev = platform_get_drvdata(pdev);
\r
1470 dev_err(&pdev->dev, "%s no valid dev found\n", __func__);
\r
1474 ap = netdev_priv(dev);
\r
1477 unregister_netdev(dev);
\r
1478 iounmap(ap->regs);
\r
1480 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
\r
1481 release_mem_region(res->start, resource_size(res));
\r
1483 platform_set_drvdata(pdev, NULL);
\r
1488 static struct platform_driver rk29_vmac_driver = {
\r
1489 .probe = vmac_probe,
\r
1490 .remove = __devexit_p(vmac_remove),
\r
1492 .name = "rk29 vmac",
\r
1496 static int __init vmac_init(void)
\r
1498 return platform_driver_register(&rk29_vmac_driver);
\r
1501 static void __exit vmac_exit(void)
\r
1503 platform_driver_unregister(&rk29_vmac_driver);
\r
1506 module_init(vmac_init);
\r
1507 module_exit(vmac_exit);
\r
1509 MODULE_LICENSE("GPL");
\r
1510 MODULE_DESCRIPTION("RK29 VMAC Ethernet driver");
\r
1511 MODULE_AUTHOR("amit.bhor@celunite.com, sameer.dhavale@celunite.com, andreas.fenkart@streamunlimited.com");
\r