2 * linux/arch/arc/drivers/arcvmac.c
\r
4 * Copyright (C) 2003-2006 Codito Technologies, for linux-2.4 port
\r
5 * Copyright (C) 2006-2007 Celunite Inc, for linux-2.6 port
\r
6 * Copyright (C) 2007-2008 Sagem Communications, Fehmi HAFSI
\r
7 * Copyright (C) 2009 Sagem Communications, Andreas Fenkart
\r
8 * All Rights Reserved.
\r
10 * This program is free software; you can redistribute it and/or modify
\r
11 * it under the terms of the GNU General Public License as published by
\r
12 * the Free Software Foundation; either version 2 of the License, or
\r
13 * (at your option) any later version.
\r
15 * This program is distributed in the hope that it will be useful,
\r
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
\r
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
\r
18 * GNU General Public License for more details.
\r
20 * You should have received a copy of the GNU General Public License
\r
21 * along with this program; if not, write to the Free Software
\r
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
\r
24 * external PHY support based on dnet.c
\r
25 * ring management based on bcm63xx_enet.c
\r
27 * Authors: amit.bhor@celunite.com, sameer.dhavale@celunite.com
\r
32 #include <linux/clk.h>
\r
33 #include <linux/crc32.h>
\r
34 #include <linux/delay.h>
\r
35 #include <linux/dma-mapping.h>
\r
36 #include <linux/etherdevice.h>
\r
37 #include <linux/init.h>
\r
38 #include <linux/io.h>
\r
39 #include <linux/kernel.h>
\r
40 #include <linux/module.h>
\r
41 #include <linux/moduleparam.h>
\r
42 #include <linux/netdevice.h>
\r
43 #include <linux/phy.h>
\r
44 #include <linux/platform_device.h>
\r
45 #include <linux/slab.h>
\r
46 #include <linux/types.h>
\r
47 #include <linux/wakelock.h>
\r
49 #include <mach/iomux.h>
\r
50 #include <mach/gpio.h>
\r
51 #include <mach/cru.h>
\r
52 #include <mach/board.h>
\r
54 #include "rk29_vmac.h"
\r
56 static struct wake_lock idlelock; /* add by lyx @ 20110302 */
\r
58 /* Register access macros */
\r
59 #define vmac_writel(port, value, reg) \
\r
60 writel((value), (port)->regs + reg##_OFFSET)
\r
61 #define vmac_readl(port, reg) readl((port)->regs + reg##_OFFSET)
\r
63 static unsigned char *read_mac_reg(struct net_device *dev,
\r
64 unsigned char hwaddr[ETH_ALEN])
\r
66 struct vmac_priv *ap = netdev_priv(dev);
\r
67 unsigned mac_lo, mac_hi;
\r
70 mac_lo = vmac_readl(ap, ADDRL);
\r
71 mac_hi = vmac_readl(ap, ADDRH);
\r
73 hwaddr[0] = (mac_lo >> 0) & 0xff;
\r
74 hwaddr[1] = (mac_lo >> 8) & 0xff;
\r
75 hwaddr[2] = (mac_lo >> 16) & 0xff;
\r
76 hwaddr[3] = (mac_lo >> 24) & 0xff;
\r
77 hwaddr[4] = (mac_hi >> 0) & 0xff;
\r
78 hwaddr[5] = (mac_hi >> 8) & 0xff;
\r
82 static void write_mac_reg(struct net_device *dev, unsigned char* hwaddr)
\r
84 struct vmac_priv *ap = netdev_priv(dev);
\r
85 unsigned mac_lo, mac_hi;
\r
87 mac_lo = hwaddr[3] << 24 | hwaddr[2] << 16 | hwaddr[1] << 8 | hwaddr[0];
\r
88 mac_hi = hwaddr[5] << 8 | hwaddr[4];
\r
90 vmac_writel(ap, mac_lo, ADDRL);
\r
91 vmac_writel(ap, mac_hi, ADDRH);
\r
94 static void vmac_mdio_xmit(struct vmac_priv *ap, unsigned val)
\r
96 init_completion(&ap->mdio_complete);
\r
97 vmac_writel(ap, val, MDIO_DATA);
\r
98 wait_for_completion(&ap->mdio_complete);
\r
101 static int vmac_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
\r
103 struct vmac_priv *vmac = bus->priv;
\r
105 /* only 5 bits allowed for phy-addr and reg_offset */
\r
106 WARN_ON(phy_id & ~0x1f || phy_reg & ~0x1f);
\r
108 val = MDIO_BASE | MDIO_OP_READ;
\r
109 val |= phy_id << 23 | phy_reg << 18;
\r
110 vmac_mdio_xmit(vmac, val);
\r
112 val = vmac_readl(vmac, MDIO_DATA);
\r
113 return val & MDIO_DATA_MASK;
\r
116 static int vmac_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
\r
119 struct vmac_priv *vmac = bus->priv;
\r
121 /* only 5 bits allowed for phy-addr and reg_offset */
\r
122 WARN_ON(phy_id & ~0x1f || phy_reg & ~0x1f);
\r
124 val = MDIO_BASE | MDIO_OP_WRITE;
\r
125 val |= phy_id << 23 | phy_reg << 18;
\r
126 val |= (value & MDIO_DATA_MASK);
\r
127 vmac_mdio_xmit(vmac, val);
\r
131 static void vmac_handle_link_change(struct net_device *dev)
\r
133 struct vmac_priv *ap = netdev_priv(dev);
\r
134 struct phy_device *phydev = ap->phy_dev;
\r
135 unsigned long flags;
\r
136 int report_change = 0;
\r
138 spin_lock_irqsave(&ap->lock, flags);
\r
140 if (phydev->duplex != ap->duplex) {
\r
143 tmp = vmac_readl(ap, CONTROL);
\r
145 if (phydev->duplex)
\r
150 vmac_writel(ap, tmp, CONTROL);
\r
152 ap->duplex = phydev->duplex;
\r
156 if (phydev->speed != ap->speed) {
\r
157 ap->speed = phydev->speed;
\r
161 if (phydev->link != ap->link) {
\r
162 ap->link = phydev->link;
\r
166 spin_unlock_irqrestore(&ap->lock, flags);
\r
169 phy_print_status(ap->phy_dev);
\r
172 static int __devinit vmac_mii_probe(struct net_device *dev)
\r
174 struct vmac_priv *ap = netdev_priv(dev);
\r
175 struct phy_device *phydev = NULL;
\r
176 struct clk *sys_clk;
\r
177 unsigned long clock_rate;
\r
180 /* find the first phy */
\r
181 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
\r
182 if (ap->mii_bus->phy_map[phy_addr]) {
\r
183 phydev = ap->mii_bus->phy_map[phy_addr];
\r
189 dev_err(&dev->dev, "no PHY found\n");
\r
193 /* add pin_irq, if avail */
\r
194 phydev = phy_connect(dev, dev_name(&phydev->dev),
\r
195 &vmac_handle_link_change, 0,
\r
196 //PHY_INTERFACE_MODE_MII);
\r
197 PHY_INTERFACE_MODE_RMII);//????????
\r
198 if (IS_ERR(phydev)) {
\r
199 err = PTR_ERR(phydev);
\r
200 dev_err(&dev->dev, "could not attach to PHY %d\n", err);
\r
204 phydev->supported &= PHY_BASIC_FEATURES;
\r
205 phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
\r
208 sys_clk = clk_get(NULL, "mac_ref");////////
\r
209 if (IS_ERR(sys_clk)) {
\r
210 err = PTR_ERR(sys_clk);
\r
211 goto err_disconnect;
\r
214 clk_set_rate(sys_clk,50000000);
\r
215 clock_rate = clk_get_rate(sys_clk);
\r
218 printk("%s::%d --mac clock = %d\n",__func__, __LINE__, clock_rate);
\r
219 dev_dbg(&ap->pdev->dev, "clk_get: dev_name : %s %lu\n",
\r
220 dev_name(&ap->pdev->dev),
\r
223 if (clock_rate < 25000000)
\r
224 phydev->supported &= ~(SUPPORTED_100baseT_Half |
\r
225 SUPPORTED_100baseT_Full);
\r
228 phydev->advertising = phydev->supported;
\r
233 ap->phy_dev = phydev;
\r
238 phy_disconnect(phydev);
\r
243 static int __devinit vmac_mii_init(struct vmac_priv *ap)
\r
247 ap->mii_bus = mdiobus_alloc();
\r
249 if (ap->mii_bus == NULL)
\r
252 ap->mii_bus->name = "vmac_mii_bus";
\r
253 ap->mii_bus->read = &vmac_mdio_read;
\r
254 ap->mii_bus->write = &vmac_mdio_write;
\r
256 snprintf(ap->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
\r
258 ap->mii_bus->priv = ap;
\r
261 ap->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
\r
262 if (!ap->mii_bus->irq)
\r
265 for (i = 0; i < PHY_MAX_ADDR; i++)
\r
266 ap->mii_bus->irq[i] = PHY_POLL;
\r
269 /* FIXME: what is it used for? */
\r
270 platform_set_drvdata(ap->dev, ap->mii_bus);
\r
273 err = mdiobus_register(ap->mii_bus);
\r
275 goto err_out_free_mdio_irq;
\r
277 err = vmac_mii_probe(ap->dev);
\r
279 goto err_out_unregister_bus;
\r
283 err_out_unregister_bus:
\r
284 mdiobus_unregister(ap->mii_bus);
\r
285 err_out_free_mdio_irq:
\r
286 kfree(ap->mii_bus->irq);
\r
288 mdiobus_free(ap->mii_bus);
\r
292 static void vmac_mii_exit(struct net_device *dev)
\r
294 struct vmac_priv *ap = netdev_priv(dev);
\r
297 phy_disconnect(ap->phy_dev);
\r
299 mdiobus_unregister(ap->mii_bus);
\r
300 kfree(ap->mii_bus->irq);
\r
301 mdiobus_free(ap->mii_bus);
\r
304 static int vmacether_get_settings(struct net_device *dev,
\r
305 struct ethtool_cmd *cmd)
\r
307 struct vmac_priv *ap = netdev_priv(dev);
\r
308 struct phy_device *phydev = ap->phy_dev;
\r
313 return phy_ethtool_gset(phydev, cmd);
\r
316 static int vmacether_set_settings(struct net_device *dev,
\r
317 struct ethtool_cmd *cmd)
\r
319 struct vmac_priv *ap = netdev_priv(dev);
\r
320 struct phy_device *phydev = ap->phy_dev;
\r
325 return phy_ethtool_sset(phydev, cmd);
\r
328 static int vmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
\r
330 struct vmac_priv *ap = netdev_priv(dev);
\r
331 struct phy_device *phydev = ap->phy_dev;
\r
333 if (!netif_running(dev))
\r
339 return phy_mii_ioctl(phydev, rq, cmd);
\r
342 static void vmacether_get_drvinfo(struct net_device *dev,
\r
343 struct ethtool_drvinfo *info)
\r
345 struct vmac_priv *ap = netdev_priv(dev);
\r
347 strlcpy(info->driver, VMAC_NAME, sizeof(info->driver));
\r
348 strlcpy(info->version, VMAC_VERSION, sizeof(info->version));
\r
349 snprintf(info->bus_info, sizeof(info->bus_info),
\r
350 "platform 0x%x", ap->mem_base);
\r
353 static int update_error_counters(struct net_device *dev, int status)
\r
355 struct vmac_priv *ap = netdev_priv(dev);
\r
356 dev_dbg(&ap->pdev->dev, "rx error counter overrun. status = 0x%x\n",
\r
359 /* programming error */
\r
360 WARN_ON(status & TXCH_MASK);
\r
361 WARN_ON(!(status & (MSER_MASK | RXCR_MASK | RXFR_MASK | RXFL_MASK)));
\r
363 if (status & MSER_MASK)
\r
364 ap->stats.rx_over_errors += 256; /* ran out of BD */
\r
365 if (status & RXCR_MASK)
\r
366 ap->stats.rx_crc_errors += 256;
\r
367 if (status & RXFR_MASK)
\r
368 ap->stats.rx_frame_errors += 256;
\r
369 if (status & RXFL_MASK)
\r
370 ap->stats.rx_fifo_errors += 256;
\r
375 static void update_tx_errors(struct net_device *dev, int status)
\r
377 struct vmac_priv *ap = netdev_priv(dev);
\r
380 ap->stats.tx_fifo_errors++;
\r
385 /* half duplex flags */
\r
387 ap->stats.tx_window_errors++;
\r
388 if (status & RETRY_CT)
\r
389 ap->stats.collisions += (status & RETRY_CT) >> 24;
\r
390 if (status & DROP) /* too many retries */
\r
391 ap->stats.tx_aborted_errors++;
\r
392 if (status & DEFER)
\r
393 dev_vdbg(&ap->pdev->dev, "\"defer to traffic\"\n");
\r
394 if (status & CARLOSS)
\r
395 ap->stats.tx_carrier_errors++;
\r
398 static int vmac_rx_reclaim_force(struct net_device *dev)
\r
400 struct vmac_priv *ap = netdev_priv(dev);
\r
405 dev_dbg(&ap->pdev->dev, "%s need to release %d rx sk_buff\n",
\r
406 __func__, fifo_used(&ap->rx_ring));
\r
408 while (!fifo_empty(&ap->rx_ring) && ct++ < ap->rx_ring.size) {
\r
409 struct vmac_buffer_desc *desc;
\r
410 struct sk_buff *skb;
\r
413 desc_idx = ap->rx_ring.tail;
\r
414 desc = &ap->rxbd[desc_idx];
\r
415 fifo_inc_tail(&ap->rx_ring);
\r
417 if (!ap->rx_skbuff[desc_idx]) {
\r
418 dev_err(&ap->pdev->dev, "non-populated rx_skbuff found %d\n",
\r
423 skb = ap->rx_skbuff[desc_idx];
\r
424 ap->rx_skbuff[desc_idx] = NULL;
\r
426 dma_unmap_single(&ap->pdev->dev, desc->data, skb->len,
\r
429 dev_kfree_skb(skb);
\r
432 if (!fifo_empty(&ap->rx_ring)) {
\r
433 dev_err(&ap->pdev->dev, "failed to reclaim %d rx sk_buff\n",
\r
434 fifo_used(&ap->rx_ring));
\r
440 static int vmac_rx_refill(struct net_device *dev)
\r
442 struct vmac_priv *ap = netdev_priv(dev);
\r
444 WARN_ON(fifo_full(&ap->rx_ring));
\r
446 while (!fifo_full(&ap->rx_ring)) {
\r
447 struct vmac_buffer_desc *desc;
\r
448 struct sk_buff *skb;
\r
452 desc_idx = ap->rx_ring.head;
\r
453 desc = &ap->rxbd[desc_idx];
\r
455 /* make sure we read the actual descriptor status */
\r
458 if (ap->rx_skbuff[desc_idx]) {
\r
459 /* dropped packet / buffer chaining */
\r
460 fifo_inc_head(&ap->rx_ring);
\r
462 /* return to DMA */
\r
464 desc->info = OWN_MASK | ap->rx_skb_size;
\r
468 skb = netdev_alloc_skb(dev, ap->rx_skb_size + 2);
\r
470 dev_info(&ap->pdev->dev, "failed to allocate rx_skb, skb's left %d\n",
\r
471 fifo_used(&ap->rx_ring));
\r
475 /* IP header Alignment (14 byte Ethernet header) */
\r
476 skb_reserve(skb, 2);
\r
477 WARN_ON(skb->len != 0); /* nothing received yet */
\r
479 ap->rx_skbuff[desc_idx] = skb;
\r
481 p = dma_map_single(&ap->pdev->dev, skb->data, ap->rx_skb_size,
\r
487 desc->info = OWN_MASK | ap->rx_skb_size;
\r
489 fifo_inc_head(&ap->rx_ring);
\r
492 /* If rx ring is still empty, set a timer to try allocating
\r
493 * again at a later time. */
\r
494 if (fifo_empty(&ap->rx_ring) && netif_running(dev)) {
\r
495 dev_warn(&ap->pdev->dev, "unable to refill rx ring\n");
\r
496 ap->rx_timeout.expires = jiffies + HZ;
\r
497 add_timer(&ap->rx_timeout);
\r
504 * timer callback to defer refill rx queue in case we're OOM
\r
506 static void vmac_refill_rx_timer(unsigned long data)
\r
508 struct net_device *dev;
\r
509 struct vmac_priv *ap;
\r
511 dev = (struct net_device *)data;
\r
512 ap = netdev_priv(dev);
\r
514 spin_lock(&ap->rx_lock);
\r
515 vmac_rx_refill(dev);
\r
516 spin_unlock(&ap->rx_lock);
\r
519 /* merge buffer chaining */
\r
520 struct sk_buff *vmac_merge_rx_buffers(struct net_device *dev,
\r
521 struct vmac_buffer_desc *after,
\r
522 int pkt_len) /* data */
\r
524 struct vmac_priv *ap = netdev_priv(dev);
\r
525 struct sk_buff *merge_skb, *cur_skb;
\r
526 struct dma_fifo *rx_ring;
\r
527 struct vmac_buffer_desc *desc;
\r
529 rx_ring = &ap->rx_ring;
\r
530 desc = &ap->rxbd[rx_ring->tail];
\r
532 WARN_ON(desc == after);
\r
537 /* IP header Alignment (14 byte Ethernet header) */
\r
538 merge_skb = netdev_alloc_skb(dev, pkt_len + 2);
\r
540 dev_err(&ap->pdev->dev, "failed to allocate merged rx_skb, rx skb's left %d\n",
\r
541 fifo_used(rx_ring));
\r
546 skb_reserve(merge_skb, 2);
\r
548 while (desc != after && pkt_len) {
\r
549 struct vmac_buffer_desc *desc;
\r
550 int buf_len, valid;
\r
552 /* desc needs wrapping */
\r
553 desc = &ap->rxbd[rx_ring->tail];
\r
554 cur_skb = ap->rx_skbuff[rx_ring->tail];
\r
557 dma_unmap_single(&ap->pdev->dev, desc->data, ap->rx_skb_size,
\r
560 /* do not copy FCS */
\r
561 buf_len = desc->info & LEN_MASK;
\r
562 valid = min(pkt_len, buf_len);
\r
565 memcpy(skb_put(merge_skb, valid), cur_skb->data, valid);
\r
567 fifo_inc_tail(rx_ring);
\r
570 /* merging_pressure++ */
\r
572 if (unlikely(pkt_len != 0))
\r
573 dev_err(&ap->pdev->dev, "buffer chaining bytes missing %d\n",
\r
576 WARN_ON(desc != after);
\r
581 int vmac_rx_receive(struct net_device *dev, int budget)
\r
583 struct vmac_priv *ap = netdev_priv(dev);
\r
584 struct vmac_buffer_desc *first;
\r
585 int processed, pkt_len, pkt_err;
\r
586 struct dma_fifo lookahead;
\r
591 pkt_err = pkt_len = 0;
\r
593 /* look ahead, till packet complete */
\r
594 lookahead = ap->rx_ring;
\r
597 struct vmac_buffer_desc *desc; /* cur_ */
\r
598 int desc_idx; /* cur_ */
\r
599 struct sk_buff *skb; /* pkt_ */
\r
601 desc_idx = lookahead.tail;
\r
602 desc = &ap->rxbd[desc_idx];
\r
604 /* make sure we read the actual descriptor status */
\r
607 /* break if dma ownership belongs to hw */
\r
608 if (desc->info & OWN_MASK) {
\r
609 ap->mac_rxring_head = vmac_readl(ap, MAC_RXRING_HEAD);
\r
613 if (desc->info & FRST_MASK) {
\r
617 /* don't free current */
\r
618 ap->rx_ring.tail = lookahead.tail;
\r
622 fifo_inc_tail(&lookahead);
\r
626 pkt_len += desc->info & LEN_MASK;
\r
627 pkt_err |= (desc->info & BUFF);
\r
629 if (!(desc->info & LAST_MASK))
\r
632 /* received complete packet */
\r
634 if (unlikely(pkt_err || !first)) {
\r
635 /* recycle buffers */
\r
636 ap->rx_ring.tail = lookahead.tail;
\r
640 WARN_ON(!(first->info & FRST_MASK) ||
\r
641 !(desc->info & LAST_MASK));
\r
644 /* -- valid packet -- */
\r
646 if (first != desc) {
\r
647 skb = vmac_merge_rx_buffers(dev, desc, pkt_len);
\r
651 ap->rx_ring.tail = lookahead.tail;
\r
652 ap->rx_merge_error++;
\r
656 dma_unmap_single(&ap->pdev->dev, desc->data,
\r
657 ap->rx_skb_size, DMA_FROM_DEVICE);
\r
659 skb = ap->rx_skbuff[desc_idx];
\r
660 ap->rx_skbuff[desc_idx] = NULL;
\r
661 /* desc->data != skb->data => desc->data DMA mapped */
\r
664 skb_put(skb, pkt_len - 4);
\r
668 ap->rx_ring.tail = lookahead.tail;
\r
670 WARN_ON(skb->len != pkt_len - 4);
\r
673 skb->protocol = eth_type_trans(skb, dev);
\r
674 ap->stats.rx_packets++;
\r
675 ap->stats.rx_bytes += skb->len;
\r
676 dev->last_rx = jiffies;
\r
679 } while (!fifo_empty(&lookahead) && (processed < budget));
\r
681 dev_vdbg(&ap->pdev->dev, "processed pkt %d, remaining rx buff %d\n",
\r
683 fifo_used(&ap->rx_ring));
\r
685 if (processed || fifo_empty(&ap->rx_ring))
\r
686 vmac_rx_refill(dev);
\r
691 static void vmac_toggle_irqmask(struct net_device *dev, int enable, int mask)
\r
693 struct vmac_priv *ap = netdev_priv(dev);
\r
696 tmp = vmac_readl(ap, ENABLE);
\r
701 vmac_writel(ap, tmp, ENABLE);
\r
704 static void vmac_toggle_txint(struct net_device *dev, int enable)
\r
706 struct vmac_priv *ap = netdev_priv(dev);
\r
707 unsigned long flags;
\r
709 spin_lock_irqsave(&ap->lock, flags);
\r
710 vmac_toggle_irqmask(dev, enable, TXINT_MASK);
\r
711 spin_unlock_irqrestore(&ap->lock, flags);
\r
714 static void vmac_toggle_rxint(struct net_device *dev, int enable)
\r
716 vmac_toggle_irqmask(dev, enable, RXINT_MASK);
\r
719 static int vmac_poll(struct napi_struct *napi, int budget)
\r
721 struct vmac_priv *ap;
\r
722 struct net_device *dev;
\r
724 unsigned long flags;
\r
726 ap = container_of(napi, struct vmac_priv, napi);
\r
729 /* ack interrupt */
\r
730 vmac_writel(ap, RXINT_MASK, STAT);
\r
732 spin_lock(&ap->rx_lock);
\r
733 rx_work_done = vmac_rx_receive(dev, budget);
\r
734 spin_unlock(&ap->rx_lock);
\r
736 #ifdef VERBOSE_DEBUG
\r
737 if (printk_ratelimit()) {
\r
738 dev_vdbg(&ap->pdev->dev, "poll budget %d receive rx_work_done %d\n",
\r
744 if (rx_work_done >= budget) {
\r
745 /* rx queue is not yet empty/clean */
\r
746 return rx_work_done;
\r
749 /* no more packet in rx/tx queue, remove device from poll
\r
751 spin_lock_irqsave(&ap->lock, flags);
\r
752 napi_complete(napi);
\r
753 vmac_toggle_rxint(dev, 1);
\r
754 spin_unlock_irqrestore(&ap->lock, flags);
\r
756 return rx_work_done;
\r
759 static int vmac_tx_reclaim(struct net_device *dev, int force);
\r
761 static irqreturn_t vmac_intr(int irq, void *dev_instance)
\r
763 struct net_device *dev = dev_instance;
\r
764 struct vmac_priv *ap = netdev_priv(dev);
\r
765 unsigned int status;
\r
767 spin_lock(&ap->lock);
\r
769 status = vmac_readl(ap, STAT);
\r
770 vmac_writel(ap, status, STAT);
\r
773 if (unlikely(ap->shutdown))
\r
774 dev_err(&ap->pdev->dev, "ISR during close\n");
\r
776 if (unlikely(!status & (RXINT_MASK|MDIO_MASK|ERR_MASK)))
\r
777 dev_err(&ap->pdev->dev, "No source of IRQ found\n");
\r
780 if ((status & RXINT_MASK) &&
\r
781 (ap->mac_rxring_head !=
\r
782 vmac_readl(ap, MAC_RXRING_HEAD))) {
\r
783 vmac_toggle_rxint(dev, 0);
\r
784 napi_schedule(&ap->napi);
\r
787 if (unlikely(netif_queue_stopped(dev) && (status & TXINT_MASK)))
\r
788 vmac_tx_reclaim(dev, 0);
\r
790 if (status & MDIO_MASK)
\r
791 complete(&ap->mdio_complete);
\r
793 if (unlikely(status & ERR_MASK))
\r
794 update_error_counters(dev, status);
\r
796 spin_unlock(&ap->lock);
\r
798 return IRQ_HANDLED;
\r
801 static int vmac_tx_reclaim(struct net_device *dev, int force)
\r
803 struct vmac_priv *ap = netdev_priv(dev);
\r
806 /* buffer chaining not used, see vmac_start_xmit */
\r
808 while (!fifo_empty(&ap->tx_ring)) {
\r
809 struct vmac_buffer_desc *desc;
\r
810 struct sk_buff *skb;
\r
813 desc_idx = ap->tx_ring.tail;
\r
814 desc = &ap->txbd[desc_idx];
\r
816 /* ensure other field of the descriptor were not read
\r
817 * before we checked ownership */
\r
820 if ((desc->info & OWN_MASK) && !force)
\r
823 if (desc->info & ERR_MSK_TX) {
\r
824 update_tx_errors(dev, desc->info);
\r
825 /* recycle packet, let upper level deal with it */
\r
828 skb = ap->tx_skbuff[desc_idx];
\r
829 ap->tx_skbuff[desc_idx] = NULL;
\r
832 dma_unmap_single(&ap->pdev->dev, desc->data, skb->len,
\r
835 dev_kfree_skb_any(skb);
\r
838 fifo_inc_tail(&ap->tx_ring);
\r
841 if (netif_queue_stopped(dev) && released) {
\r
842 netif_wake_queue(dev);
\r
843 vmac_toggle_txint(dev, 0);
\r
846 if (unlikely(force && !fifo_empty(&ap->tx_ring))) {
\r
847 dev_err(&ap->pdev->dev, "failed to reclaim %d tx sk_buff\n",
\r
848 fifo_used(&ap->tx_ring));
\r
854 int vmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
\r
856 struct vmac_priv *ap = netdev_priv(dev);
\r
857 struct vmac_buffer_desc *desc;
\r
860 /* running under xmit lock */
\r
862 /* no scatter/gatter see features below */
\r
863 WARN_ON(skb_shinfo(skb)->nr_frags != 0);
\r
864 WARN_ON(skb->len > MAX_TX_BUFFER_LEN);
\r
866 if (unlikely(fifo_full(&ap->tx_ring))) {
\r
867 netif_stop_queue(dev);
\r
868 vmac_toggle_txint(dev, 1);
\r
869 dev_err(&ap->pdev->dev, "xmit called with no tx desc available\n");
\r
870 return NETDEV_TX_BUSY;
\r
873 if (unlikely(skb->len < ETH_ZLEN)) {
\r
874 struct sk_buff *short_skb;
\r
875 short_skb = netdev_alloc_skb(dev, ETH_ZLEN);
\r
877 return NETDEV_TX_LOCKED;
\r
879 memset(short_skb->data, 0, ETH_ZLEN);
\r
880 memcpy(skb_put(short_skb, ETH_ZLEN), skb->data, skb->len);
\r
881 dev_kfree_skb(skb);
\r
885 /* fill descriptor */
\r
886 ap->tx_skbuff[ap->tx_ring.head] = skb;
\r
888 desc = &ap->txbd[ap->tx_ring.head];
\r
889 desc->data = dma_map_single(&ap->pdev->dev, skb->data, skb->len,
\r
892 /* dma might already be polling */
\r
894 desc->info = OWN_MASK | FRST_MASK | LAST_MASK | skb->len;
\r
898 tmp = vmac_readl(ap, STAT);
\r
899 vmac_writel(ap, tmp | TXPL_MASK, STAT);
\r
901 ap->stats.tx_packets++;
\r
902 ap->stats.tx_bytes += skb->len;
\r
903 dev->trans_start = jiffies;
\r
904 fifo_inc_head(&ap->tx_ring);
\r
906 /* vmac_tx_reclaim independent of vmac_tx_timeout */
\r
907 if (fifo_used(&ap->tx_ring) > 8)
\r
908 vmac_tx_reclaim(dev, 0);
\r
910 /* stop queue if no more desc available */
\r
911 if (fifo_full(&ap->tx_ring)) {
\r
912 netif_stop_queue(dev);
\r
913 vmac_toggle_txint(dev, 1);
\r
916 return NETDEV_TX_OK;
\r
919 static int alloc_buffers(struct net_device *dev)
\r
921 struct vmac_priv *ap = netdev_priv(dev);
\r
925 fifo_init(&ap->rx_ring, RX_BDT_LEN);
\r
926 fifo_init(&ap->tx_ring, TX_BDT_LEN);
\r
928 /* initialize skb list */
\r
929 memset(ap->rx_skbuff, 0, sizeof(ap->rx_skbuff));
\r
930 memset(ap->tx_skbuff, 0, sizeof(ap->tx_skbuff));
\r
932 /* allocate DMA received descriptors */
\r
933 size = sizeof(*ap->rxbd) * ap->rx_ring.size;
\r
934 ap->rxbd = dma_alloc_coherent(&ap->pdev->dev, size,
\r
937 if (ap->rxbd == NULL)
\r
940 /* allocate DMA transmit descriptors */
\r
941 size = sizeof(*ap->txbd) * ap->tx_ring.size;
\r
942 ap->txbd = dma_alloc_coherent(&ap->pdev->dev, size,
\r
945 if (ap->txbd == NULL)
\r
946 goto err_free_rxbd;
\r
948 /* ensure 8-byte aligned */
\r
949 WARN_ON(((int)ap->txbd & 0x7) || ((int)ap->rxbd & 0x7));
\r
951 memset(ap->txbd, 0, sizeof(*ap->txbd) * ap->tx_ring.size);
\r
952 memset(ap->rxbd, 0, sizeof(*ap->rxbd) * ap->rx_ring.size);
\r
954 /* allocate rx skb */
\r
955 err = vmac_rx_refill(dev);
\r
957 goto err_free_txbd;
\r
962 dma_free_coherent(&ap->pdev->dev, sizeof(*ap->txbd) * ap->tx_ring.size,
\r
963 ap->txbd, ap->txbd_dma);
\r
965 dma_free_coherent(&ap->pdev->dev, sizeof(*ap->rxbd) * ap->rx_ring.size,
\r
966 ap->rxbd, ap->rxbd_dma);
\r
971 static int free_buffers(struct net_device *dev)
\r
973 struct vmac_priv *ap = netdev_priv(dev);
\r
976 vmac_tx_reclaim(dev, 1);
\r
977 vmac_rx_reclaim_force(dev);
\r
979 /* free DMA ring */
\r
980 dma_free_coherent(&ap->pdev->dev, sizeof(ap->txbd) * ap->tx_ring.size,
\r
981 ap->txbd, ap->txbd_dma);
\r
982 dma_free_coherent(&ap->pdev->dev, sizeof(ap->rxbd) * ap->rx_ring.size,
\r
983 ap->rxbd, ap->rxbd_dma);
\r
988 static int vmac_hw_init(struct net_device *dev)
\r
990 struct vmac_priv *priv = netdev_priv(dev);
\r
992 /* clear IRQ mask */
\r
993 vmac_writel(priv, 0, ENABLE);
\r
995 /* clear pending IRQ */
\r
996 vmac_writel(priv, 0xffffffff, STAT);
\r
998 /* Initialize logical address filter */
\r
999 vmac_writel(priv, 0x0, LAFL);
\r
1000 vmac_writel(priv, 0x0, LAFH);
\r
1006 static int vmac_register_print(struct net_device *dev)
\r
1008 struct vmac_priv *ap = netdev_priv(dev);
\r
1010 printk("func::%s vmac register %s value = 0x%x\n", __func__, "ID", vmac_readl(ap, ID));
\r
1011 printk("func::%s vmac register %s value = 0x%x\n", __func__, "STAT", vmac_readl(ap, STAT));
\r
1012 printk("func::%s vmac register %s value = 0x%x\n", __func__, "ENABLE", vmac_readl(ap, ENABLE));
\r
1013 printk("func::%s vmac register %s value = 0x%x\n", __func__, "CONTROL", vmac_readl(ap, CONTROL));
\r
1014 printk("func::%s vmac register %s value = 0x%x\n", __func__, "ADDRL", vmac_readl(ap, ADDRL));
\r
1015 printk("func::%s vmac register %s value = 0x%x\n", __func__, "ADDRH", vmac_readl(ap, ADDRH));
\r
1021 int vmac_open(struct net_device *dev)
\r
1023 struct vmac_priv *ap = netdev_priv(dev);
\r
1024 struct phy_device *phydev;
\r
1025 unsigned int temp;
\r
1027 struct clk *mac_clk = NULL;
\r
1028 struct clk *mac_parent = NULL;
\r
1029 struct clk *arm_clk = NULL;
\r
1030 struct rk29_vmac_platform_data *pdata = ap->pdev->dev.platform_data;
\r
1037 //set rmii ref clock 50MHz
\r
1038 mac_clk = clk_get(NULL, "mac_ref_div");
\r
1039 arm_clk = clk_get(NULL, "arm_pll");
\r
1040 mac_parent = clk_get_parent(mac_clk);
\r
1041 if (arm_clk && mac_parent && (arm_clk == mac_parent))
\r
1042 wake_lock(&idlelock);
\r
1044 clk_set_rate(mac_clk, 50000000);
\r
1045 clk_enable(clk_get(NULL,"mii_rx"));
\r
1046 clk_enable(clk_get(NULL,"mii_tx"));
\r
1047 clk_enable(clk_get(NULL,"hclk_mac"));
\r
1048 clk_enable(clk_get(NULL,"mac_ref"));
\r
1051 if (pdata && pdata->rmii_power_control)
\r
1052 pdata->rmii_power_control(1);
\r
1056 vmac_hw_init(dev);
\r
1058 /* mac address changed? */
\r
1059 write_mac_reg(dev, dev->dev_addr);
\r
1061 err = alloc_buffers(dev);
\r
1065 err = request_irq(dev->irq, &vmac_intr, 0, dev->name, dev);
\r
1067 dev_err(&ap->pdev->dev, "Unable to request IRQ %d (error %d)\n",
\r
1069 goto err_free_buffers;
\r
1072 /* install DMA ring pointers */
\r
1073 vmac_writel(ap, ap->rxbd_dma, RXRINGPTR);
\r
1074 vmac_writel(ap, ap->txbd_dma, TXRINGPTR);
\r
1076 /* set poll rate to 1 ms */
\r
1077 vmac_writel(ap, POLLRATE_TIME, POLLRATE);
\r
1079 /* make sure we enable napi before rx interrupt */
\r
1080 napi_enable(&ap->napi);
\r
1083 temp = RXINT_MASK | ERR_MASK | TXCH_MASK | MDIO_MASK;
\r
1084 vmac_writel(ap, temp, ENABLE);
\r
1087 temp = (RX_BDT_LEN << 24) | (TX_BDT_LEN << 16) | TXRN_MASK | RXRN_MASK;
\r
1088 vmac_writel(ap, temp, CONTROL);
\r
1090 /* enable, after all other bits are set */
\r
1091 vmac_writel(ap, temp | EN_MASK, CONTROL);
\r
1093 netif_start_queue(dev);
\r
1094 netif_carrier_off(dev);
\r
1097 vmac_register_print(dev);
\r
1100 /* register the PHY board fixup, if needed */
\r
1101 err = vmac_mii_init(ap);
\r
1103 goto err_free_irq;
\r
1105 /* schedule a link state check */
\r
1106 phy_start(ap->phy_dev);
\r
1108 phydev = ap->phy_dev;
\r
1109 dev_info(&ap->pdev->dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
\r
1110 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
\r
1115 free_irq(dev->irq, dev);
\r
1117 free_buffers(dev);
\r
1119 if (arm_clk && mac_parent && (arm_clk == mac_parent))
\r
1120 wake_unlock(&idlelock);
\r
1125 int vmac_close(struct net_device *dev)
\r
1127 struct vmac_priv *ap = netdev_priv(dev);
\r
1128 unsigned int temp;
\r
1129 struct clk *mac_clk = NULL;
\r
1130 struct clk *arm_clk = NULL;
\r
1131 struct clk *mac_parent = NULL;
\r
1132 struct rk29_vmac_platform_data *pdata = ap->pdev->dev.platform_data;
\r
1134 netif_stop_queue(dev);
\r
1135 napi_disable(&ap->napi);
\r
1137 /* stop running transfers */
\r
1138 temp = vmac_readl(ap, CONTROL);
\r
1139 temp &= ~(TXRN_MASK | RXRN_MASK);
\r
1140 vmac_writel(ap, temp, CONTROL);
\r
1142 del_timer_sync(&ap->rx_timeout);
\r
1145 phy_stop(ap->phy_dev);
\r
1146 vmac_mii_exit(dev);
\r
1147 netif_carrier_off(dev);
\r
1149 /* disable interrupts */
\r
1150 vmac_writel(ap, 0, ENABLE);
\r
1151 free_irq(dev->irq, dev);
\r
1153 /* turn off vmac */
\r
1154 vmac_writel(ap, 0, CONTROL);
\r
1155 /* vmac_reset_hw(vmac) */
\r
1160 free_buffers(dev);
\r
1163 if (pdata && pdata->rmii_power_control)
\r
1164 pdata->rmii_power_control(0);
\r
1167 mac_clk = clk_get(NULL, "mac_ref_div");
\r
1168 mac_parent = clk_get_parent(mac_clk);
\r
1169 arm_clk = clk_get(NULL, "arm_pll");
\r
1171 if (arm_clk && mac_parent && (arm_clk == mac_parent))
\r
1172 wake_unlock(&idlelock);
\r
1174 clk_disable(clk_get(NULL,"mii_rx"));
\r
1175 clk_disable(clk_get(NULL,"mii_tx"));
\r
1176 clk_disable(clk_get(NULL,"hclk_mac"));
\r
1177 clk_disable(clk_get(NULL,"mac_ref"));
\r
1182 void vmac_update_stats(struct vmac_priv *ap)
\r
1184 struct net_device_stats *_stats = &ap->stats;
\r
1185 unsigned long miss, rxerr;
\r
1186 unsigned long rxfram, rxcrc, rxoflow;
\r
1188 /* compare with /proc/net/dev,
\r
1189 * see net/core/dev.c:dev_seq_printf_stats */
\r
1192 rxerr = vmac_readl(ap, RXERR);
\r
1193 miss = vmac_readl(ap, MISS);
\r
1195 rxcrc = (rxerr & RXERR_CRC);
\r
1196 rxfram = (rxerr & RXERR_FRM) >> 8;
\r
1197 rxoflow = (rxerr & RXERR_OFLO) >> 16;
\r
1199 _stats->rx_length_errors = 0;
\r
1200 _stats->rx_over_errors += miss;
\r
1201 _stats->rx_crc_errors += rxcrc;
\r
1202 _stats->rx_frame_errors += rxfram;
\r
1203 _stats->rx_fifo_errors += rxoflow;
\r
1204 _stats->rx_missed_errors = 0;
\r
1206 /* TODO check rx_dropped/rx_errors/tx_dropped/tx_errors have not
\r
1207 * been updated elsewhere */
\r
1208 _stats->rx_dropped = _stats->rx_over_errors +
\r
1209 _stats->rx_fifo_errors +
\r
1210 ap->rx_merge_error;
\r
1212 _stats->rx_errors = _stats->rx_length_errors + _stats->rx_crc_errors +
\r
1213 _stats->rx_frame_errors +
\r
1214 _stats->rx_missed_errors +
\r
1215 _stats->rx_dropped;
\r
1218 _stats->tx_dropped = 0; /* otherwise queue stopped */
\r
1220 _stats->tx_errors = _stats->tx_aborted_errors +
\r
1221 _stats->tx_carrier_errors +
\r
1222 _stats->tx_fifo_errors +
\r
1223 _stats->tx_heartbeat_errors +
\r
1224 _stats->tx_window_errors +
\r
1225 _stats->tx_dropped +
\r
1226 ap->tx_timeout_error;
\r
1229 struct net_device_stats *vmac_stats(struct net_device *dev)
\r
1231 struct vmac_priv *ap = netdev_priv(dev);
\r
1232 unsigned long flags;
\r
1234 spin_lock_irqsave(&ap->lock, flags);
\r
1235 vmac_update_stats(ap);
\r
1236 spin_unlock_irqrestore(&ap->lock, flags);
\r
1238 return &ap->stats;
\r
1241 void vmac_tx_timeout(struct net_device *dev)
\r
1243 struct vmac_priv *ap = netdev_priv(dev);
\r
1244 unsigned int status;
\r
1245 unsigned long flags;
\r
1247 spin_lock_irqsave(&ap->lock, flags);
\r
1249 /* queue did not progress for timeo jiffies */
\r
1250 WARN_ON(!netif_queue_stopped(dev));
\r
1251 WARN_ON(!fifo_full(&ap->tx_ring));
\r
1253 /* TX IRQ lost? */
\r
1254 status = vmac_readl(ap, STAT);
\r
1255 if (status & TXINT_MASK) {
\r
1256 dev_err(&ap->pdev->dev, "lost tx interrupt, IRQ mask %x\n",
\r
1257 vmac_readl(ap, ENABLE));
\r
1258 vmac_writel(ap, TXINT_MASK, STAT);
\r
1261 /* TODO RX/MDIO/ERR as well? */
\r
1263 vmac_tx_reclaim(dev, 0);
\r
1264 if (fifo_full(&ap->tx_ring))
\r
1265 dev_err(&ap->pdev->dev, "DMA state machine not active\n");
\r
1267 /* We can accept TX packets again */
\r
1268 ap->tx_timeout_error++;
\r
1269 dev->trans_start = jiffies;
\r
1270 netif_wake_queue(dev);
\r
1272 spin_unlock_irqrestore(&ap->lock, flags);
\r
1275 static void create_multicast_filter(struct net_device *dev,
\r
1276 unsigned long *bitmask)
\r
1279 struct netdev_hw_addr *ha;
\r
1280 unsigned long crc;
\r
1282 struct netdev_hw_addr_list *list = &dev->dev_addrs;
\r
1284 //printk("-----------------func %s-------------------\n", __func__);
\r
1286 WARN_ON(dev->mc_count == 0);
\r
1287 WARN_ON(dev->flags & IFF_ALLMULTI);
\r
1289 bitmask[0] = bitmask[1] = 0;
\r
1291 list_for_each_entry(ha, &list->list, list) {
\r
1294 /* skip non-multicast addresses */
\r
1295 if (!(*addrs & 1))
\r
1298 crc = ether_crc_le(ETH_ALEN, addrs);
\r
1299 set_bit(crc >> 26, bitmask);
\r
1303 struct netdev_hw_addr *ha;
\r
1304 unsigned long crc;
\r
1307 WARN_ON(netdev_mc_count(dev) == 0);
\r
1308 WARN_ON(dev->flags & IFF_ALLMULTI);
\r
1310 bitmask[0] = bitmask[1] = 0;
\r
1312 netdev_for_each_mc_addr(ha, dev) {
\r
1315 /* skip non-multicast addresses */
\r
1316 if (!(*addrs & 1))
\r
1319 crc = ether_crc_le(ETH_ALEN, addrs);
\r
1320 set_bit(crc >> 26, bitmask);
\r
1324 static void vmac_set_multicast_list(struct net_device *dev)
\r
1327 struct vmac_priv *ap = netdev_priv(dev);
\r
1328 unsigned long flags, bitmask[2];
\r
1331 //printk("-----------------func %s-------------------\n", __func__);
\r
1333 spin_lock_irqsave(&ap->lock, flags);
\r
1335 promisc = !!(dev->flags & IFF_PROMISC);
\r
1336 reg = vmac_readl(ap, ENABLE);
\r
1337 if (promisc != !!(reg & PROM_MASK)) {
\r
1339 vmac_writel(ap, reg, ENABLE);
\r
1342 if (dev->flags & IFF_ALLMULTI)
\r
1343 memset(bitmask, 1, sizeof(bitmask));
\r
1344 else if (dev->mc_count == 0)
\r
1345 memset(bitmask, 0, sizeof(bitmask));
\r
1347 create_multicast_filter(dev, bitmask);
\r
1349 vmac_writel(ap, bitmask[0], LAFL);
\r
1350 vmac_writel(ap, bitmask[1], LAFH);
\r
1352 spin_unlock_irqrestore(&ap->lock, flags);
\r
1354 struct vmac_priv *ap = netdev_priv(dev);
\r
1355 unsigned long flags, bitmask[2];
\r
1358 spin_lock_irqsave(&ap->lock, flags);
\r
1360 promisc = !!(dev->flags & IFF_PROMISC);
\r
1361 reg = vmac_readl(ap, ENABLE);
\r
1362 if (promisc != !!(reg & PROM_MASK)) {
\r
1364 vmac_writel(ap, reg, ENABLE);
\r
1367 if (dev->flags & IFF_ALLMULTI)
\r
1368 memset(bitmask, 1, sizeof(bitmask));
\r
1369 else if (netdev_mc_count(dev) == 0)
\r
1370 memset(bitmask, 0, sizeof(bitmask));
\r
1372 create_multicast_filter(dev, bitmask);
\r
1374 vmac_writel(ap, bitmask[0], LAFL);
\r
1375 vmac_writel(ap, bitmask[1], LAFH);
\r
1377 spin_unlock_irqrestore(&ap->lock, flags);
\r
1381 static struct ethtool_ops vmac_ethtool_ops = {
\r
1382 .get_settings = vmacether_get_settings,
\r
1383 .set_settings = vmacether_set_settings,
\r
1384 .get_drvinfo = vmacether_get_drvinfo,
\r
1385 .get_link = ethtool_op_get_link,
\r
1388 static const struct net_device_ops vmac_netdev_ops = {
\r
1389 .ndo_open = vmac_open,
\r
1390 .ndo_stop = vmac_close,
\r
1391 .ndo_get_stats = vmac_stats,
\r
1392 .ndo_start_xmit = vmac_start_xmit,
\r
1393 .ndo_do_ioctl = vmac_ioctl,
\r
1394 .ndo_set_mac_address = eth_mac_addr,
\r
1395 .ndo_tx_timeout = vmac_tx_timeout,
\r
1396 .ndo_set_multicast_list = vmac_set_multicast_list,
\r
1397 .ndo_validate_addr = eth_validate_addr,
\r
1398 .ndo_change_mtu = eth_change_mtu,
\r
1401 static int __devinit vmac_probe(struct platform_device *pdev)
\r
1403 struct net_device *dev;
\r
1404 struct vmac_priv *ap;
\r
1405 struct resource *res;
\r
1406 unsigned int mem_base, mem_size, irq;
\r
1408 struct clk *sys_clk;
\r
1409 struct rk29_vmac_platform_data *pdata = pdev->dev.platform_data;
\r
1411 dev = alloc_etherdev(sizeof(*ap));
\r
1413 dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
\r
1417 ap = netdev_priv(dev);
\r
1420 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
\r
1422 dev_err(&pdev->dev, "no mmio resource defined\n");
\r
1425 mem_base = res->start;
\r
1426 mem_size = resource_size(res);
\r
1427 irq = platform_get_irq(pdev, 0);
\r
1430 if (!request_mem_region(mem_base, mem_size, VMAC_NAME)) {
\r
1431 dev_err(&pdev->dev, "no memory region available\n");
\r
1436 ap->regs = ioremap(mem_base, mem_size);
\r
1438 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
\r
1439 goto err_out_release_mem;
\r
1442 /* no checksum support, hence no scatter/gather */
\r
1443 dev->features |= NETIF_F_HIGHDMA;
\r
1445 spin_lock_init(&ap->lock);
\r
1447 SET_NETDEV_DEV(dev, &pdev->dev);
\r
1451 /* init rx timeout (used for oom) */
\r
1452 init_timer(&ap->rx_timeout);
\r
1453 ap->rx_timeout.function = vmac_refill_rx_timer;
\r
1454 ap->rx_timeout.data = (unsigned long)dev;
\r
1456 netif_napi_add(dev, &ap->napi, vmac_poll, 2);
\r
1457 dev->netdev_ops = &vmac_netdev_ops;
\r
1458 dev->ethtool_ops = &vmac_ethtool_ops;
\r
1461 dev->flags |= IFF_MULTICAST;////////////////////
\r
1463 dev->base_addr = (unsigned long)ap->regs;
\r
1464 ap->mem_base = mem_base;
\r
1466 /* prevent buffer chaining, favor speed over space */
\r
1467 ap->rx_skb_size = ETH_FRAME_LEN + VMAC_BUFFER_PAD;
\r
1469 /* private struct functional */
\r
1471 /* mac address intialize, set vmac_open */
\r
1472 read_mac_reg(dev, dev->dev_addr);
\r
1474 if (!is_valid_ether_addr(dev->dev_addr))
\r
1475 random_ether_addr(dev->dev_addr);
\r
1477 err = register_netdev(dev);
\r
1479 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
\r
1480 goto err_out_iounmap;
\r
1483 dev_info(&pdev->dev, "ARC VMAC at 0x%08x irq %d %pM\n", mem_base,
\r
1484 dev->irq, dev->dev_addr);
\r
1485 platform_set_drvdata(pdev, dev);
\r
1487 wake_lock_init(&idlelock, WAKE_LOCK_IDLE, "vmac");
\r
1489 //config rk29 vmac as rmii, 100MHz
\r
1490 if (pdata && pdata->vmac_register_set)
\r
1491 pdata->vmac_register_set();
\r
1493 //power gpio init, phy power off default for power reduce
\r
1494 if (pdata && pdata->rmii_io_init)
\r
1495 pdata->rmii_io_init();
\r
1500 iounmap(ap->regs);
\r
1501 err_out_release_mem:
\r
1502 release_mem_region(mem_base, mem_size);
\r
1508 static int __devexit vmac_remove(struct platform_device *pdev)
\r
1510 struct net_device *dev;
\r
1511 struct vmac_priv *ap;
\r
1512 struct resource *res;
\r
1513 struct rk29_vmac_platform_data *pdata = pdev->dev.platform_data;
\r
1515 wake_lock_destroy(&idlelock);
\r
1517 //power gpio deinit, phy power off
\r
1518 if (pdata && pdata->rmii_io_deinit)
\r
1519 pdata->rmii_io_deinit();
\r
1521 dev = platform_get_drvdata(pdev);
\r
1523 dev_err(&pdev->dev, "%s no valid dev found\n", __func__);
\r
1527 ap = netdev_priv(dev);
\r
1530 unregister_netdev(dev);
\r
1531 iounmap(ap->regs);
\r
1533 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
\r
1534 release_mem_region(res->start, resource_size(res));
\r
1536 platform_set_drvdata(pdev, NULL);
\r
1541 static struct platform_driver rk29_vmac_driver = {
\r
1542 .probe = vmac_probe,
\r
1543 .remove = __devexit_p(vmac_remove),
\r
1545 .name = "rk29 vmac",
\r
1549 static int __init vmac_init(void)
\r
1551 return platform_driver_register(&rk29_vmac_driver);
\r
1554 static void __exit vmac_exit(void)
\r
1556 platform_driver_unregister(&rk29_vmac_driver);
\r
1559 module_init(vmac_init);
\r
1560 module_exit(vmac_exit);
\r
1562 MODULE_LICENSE("GPL");
\r
1563 MODULE_DESCRIPTION("RK29 VMAC Ethernet driver");
\r
1564 MODULE_AUTHOR("amit.bhor@celunite.com, sameer.dhavale@celunite.com, andreas.fenkart@streamunlimited.com");
\r