2 * Cadence MACB/GEM Ethernet Controller driver
4 * Copyright (C) 2004-2006 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/clk.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/circ_buf.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
20 #include <linux/gpio.h>
21 #include <linux/interrupt.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/platform_data/macb.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
29 #include <linux/of_device.h>
30 #include <linux/of_net.h>
31 #include <linux/pinctrl/consumer.h>
35 #define RX_BUFFER_SIZE 128
36 #define RX_RING_SIZE 512 /* must be power of 2 */
37 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
39 #define TX_RING_SIZE 128 /* must be power of 2 */
40 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
42 /* level of occupied TX descriptors under which we wake up TX process */
43 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
45 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
47 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
50 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
53 * Graceful stop timeouts in us. We should allow up to
54 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
56 #define MACB_HALT_TIMEOUT 1230
58 /* Ring buffer accessors */
59 static unsigned int macb_tx_ring_wrap(unsigned int index)
61 return index & (TX_RING_SIZE - 1);
64 static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
66 return &bp->tx_ring[macb_tx_ring_wrap(index)];
69 static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
71 return &bp->tx_skb[macb_tx_ring_wrap(index)];
74 static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
78 offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
80 return bp->tx_ring_dma + offset;
83 static unsigned int macb_rx_ring_wrap(unsigned int index)
85 return index & (RX_RING_SIZE - 1);
88 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
90 return &bp->rx_ring[macb_rx_ring_wrap(index)];
93 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
95 return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index);
98 void macb_set_hwaddr(struct macb *bp)
103 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
104 macb_or_gem_writel(bp, SA1B, bottom);
105 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
106 macb_or_gem_writel(bp, SA1T, top);
108 /* Clear unused address register sets */
109 macb_or_gem_writel(bp, SA2B, 0);
110 macb_or_gem_writel(bp, SA2T, 0);
111 macb_or_gem_writel(bp, SA3B, 0);
112 macb_or_gem_writel(bp, SA3T, 0);
113 macb_or_gem_writel(bp, SA4B, 0);
114 macb_or_gem_writel(bp, SA4T, 0);
116 EXPORT_SYMBOL_GPL(macb_set_hwaddr);
118 void macb_get_hwaddr(struct macb *bp)
120 struct macb_platform_data *pdata;
126 pdata = bp->pdev->dev.platform_data;
128 /* Check all 4 address register for vaild address */
129 for (i = 0; i < 4; i++) {
130 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
131 top = macb_or_gem_readl(bp, SA1T + i * 8);
133 if (pdata && pdata->rev_eth_addr) {
134 addr[5] = bottom & 0xff;
135 addr[4] = (bottom >> 8) & 0xff;
136 addr[3] = (bottom >> 16) & 0xff;
137 addr[2] = (bottom >> 24) & 0xff;
138 addr[1] = top & 0xff;
139 addr[0] = (top & 0xff00) >> 8;
141 addr[0] = bottom & 0xff;
142 addr[1] = (bottom >> 8) & 0xff;
143 addr[2] = (bottom >> 16) & 0xff;
144 addr[3] = (bottom >> 24) & 0xff;
145 addr[4] = top & 0xff;
146 addr[5] = (top >> 8) & 0xff;
149 if (is_valid_ether_addr(addr)) {
150 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
155 netdev_info(bp->dev, "invalid hw address, using random\n");
156 eth_hw_addr_random(bp->dev);
158 EXPORT_SYMBOL_GPL(macb_get_hwaddr);
160 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
162 struct macb *bp = bus->priv;
165 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
166 | MACB_BF(RW, MACB_MAN_READ)
167 | MACB_BF(PHYA, mii_id)
168 | MACB_BF(REGA, regnum)
169 | MACB_BF(CODE, MACB_MAN_CODE)));
171 /* wait for end of transfer */
172 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
175 value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
180 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
183 struct macb *bp = bus->priv;
185 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
186 | MACB_BF(RW, MACB_MAN_WRITE)
187 | MACB_BF(PHYA, mii_id)
188 | MACB_BF(REGA, regnum)
189 | MACB_BF(CODE, MACB_MAN_CODE)
190 | MACB_BF(DATA, value)));
192 /* wait for end of transfer */
193 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
199 static int macb_mdio_reset(struct mii_bus *bus)
204 static void macb_handle_link_change(struct net_device *dev)
206 struct macb *bp = netdev_priv(dev);
207 struct phy_device *phydev = bp->phy_dev;
210 int status_change = 0;
212 spin_lock_irqsave(&bp->lock, flags);
215 if ((bp->speed != phydev->speed) ||
216 (bp->duplex != phydev->duplex)) {
219 reg = macb_readl(bp, NCFGR);
220 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
222 reg &= ~GEM_BIT(GBE);
226 if (phydev->speed == SPEED_100)
227 reg |= MACB_BIT(SPD);
228 if (phydev->speed == SPEED_1000)
231 macb_or_gem_writel(bp, NCFGR, reg);
233 bp->speed = phydev->speed;
234 bp->duplex = phydev->duplex;
239 if (phydev->link != bp->link) {
244 bp->link = phydev->link;
249 spin_unlock_irqrestore(&bp->lock, flags);
253 netif_carrier_on(dev);
254 netdev_info(dev, "link up (%d/%s)\n",
256 phydev->duplex == DUPLEX_FULL ?
259 netif_carrier_off(dev);
260 netdev_info(dev, "link down\n");
265 /* based on au1000_eth. c*/
266 static int macb_mii_probe(struct net_device *dev)
268 struct macb *bp = netdev_priv(dev);
269 struct macb_platform_data *pdata;
270 struct phy_device *phydev;
274 phydev = phy_find_first(bp->mii_bus);
276 netdev_err(dev, "no PHY found\n");
280 pdata = dev_get_platdata(&bp->pdev->dev);
281 if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
282 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
284 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
285 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
289 /* attach the mac to the phy */
290 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
293 netdev_err(dev, "Could not attach to PHY\n");
297 /* mask with MAC supported features */
299 phydev->supported &= PHY_GBIT_FEATURES;
301 phydev->supported &= PHY_BASIC_FEATURES;
303 phydev->advertising = phydev->supported;
308 bp->phy_dev = phydev;
313 int macb_mii_init(struct macb *bp)
315 struct macb_platform_data *pdata;
318 /* Enable management port */
319 macb_writel(bp, NCR, MACB_BIT(MPE));
321 bp->mii_bus = mdiobus_alloc();
322 if (bp->mii_bus == NULL) {
327 bp->mii_bus->name = "MACB_mii_bus";
328 bp->mii_bus->read = &macb_mdio_read;
329 bp->mii_bus->write = &macb_mdio_write;
330 bp->mii_bus->reset = &macb_mdio_reset;
331 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
332 bp->pdev->name, bp->pdev->id);
333 bp->mii_bus->priv = bp;
334 bp->mii_bus->parent = &bp->dev->dev;
335 pdata = bp->pdev->dev.platform_data;
338 bp->mii_bus->phy_mask = pdata->phy_mask;
340 bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
341 if (!bp->mii_bus->irq) {
343 goto err_out_free_mdiobus;
346 for (i = 0; i < PHY_MAX_ADDR; i++)
347 bp->mii_bus->irq[i] = PHY_POLL;
349 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
351 if (mdiobus_register(bp->mii_bus))
352 goto err_out_free_mdio_irq;
354 if (macb_mii_probe(bp->dev) != 0) {
355 goto err_out_unregister_bus;
360 err_out_unregister_bus:
361 mdiobus_unregister(bp->mii_bus);
362 err_out_free_mdio_irq:
363 kfree(bp->mii_bus->irq);
364 err_out_free_mdiobus:
365 mdiobus_free(bp->mii_bus);
369 EXPORT_SYMBOL_GPL(macb_mii_init);
371 static void macb_update_stats(struct macb *bp)
373 u32 __iomem *reg = bp->regs + MACB_PFR;
374 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
375 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
377 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
379 for(; p < end; p++, reg++)
380 *p += __raw_readl(reg);
383 static int macb_halt_tx(struct macb *bp)
385 unsigned long halt_time, timeout;
388 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
390 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
393 status = macb_readl(bp, TSR);
394 if (!(status & MACB_BIT(TGO)))
397 usleep_range(10, 250);
398 } while (time_before(halt_time, timeout));
403 static void macb_tx_error_task(struct work_struct *work)
405 struct macb *bp = container_of(work, struct macb, tx_error_task);
406 struct macb_tx_skb *tx_skb;
410 netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
411 bp->tx_tail, bp->tx_head);
413 /* Make sure nobody is trying to queue up new packets */
414 netif_stop_queue(bp->dev);
417 * Stop transmission now
418 * (in case we have just queued new packets)
420 if (macb_halt_tx(bp))
421 /* Just complain for now, reinitializing TX path can be good */
422 netdev_err(bp->dev, "BUG: halt tx timed out\n");
424 /* No need for the lock here as nobody will interrupt us anymore */
427 * Treat frames in TX queue including the ones that caused the error.
428 * Free transmit buffers in upper layer.
430 for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
431 struct macb_dma_desc *desc;
434 desc = macb_tx_desc(bp, tail);
436 tx_skb = macb_tx_skb(bp, tail);
439 if (ctrl & MACB_BIT(TX_USED)) {
440 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
441 macb_tx_ring_wrap(tail), skb->data);
442 bp->stats.tx_packets++;
443 bp->stats.tx_bytes += skb->len;
446 * "Buffers exhausted mid-frame" errors may only happen
447 * if the driver is buggy, so complain loudly about those.
448 * Statistics are updated by hardware.
450 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
452 "BUG: TX buffers exhausted mid-frame\n");
454 desc->ctrl = ctrl | MACB_BIT(TX_USED);
457 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
463 /* Make descriptor updates visible to hardware */
466 /* Reinitialize the TX desc queue */
467 macb_writel(bp, TBQP, bp->tx_ring_dma);
468 /* Make TX ring reflect state of hardware */
469 bp->tx_head = bp->tx_tail = 0;
471 /* Now we are ready to start transmission again */
472 netif_wake_queue(bp->dev);
474 /* Housework before enabling TX IRQ */
475 macb_writel(bp, TSR, macb_readl(bp, TSR));
476 macb_writel(bp, IER, MACB_TX_INT_FLAGS);
479 static void macb_tx_interrupt(struct macb *bp)
485 status = macb_readl(bp, TSR);
486 macb_writel(bp, TSR, status);
488 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
489 macb_writel(bp, ISR, MACB_BIT(TCOMP));
491 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
492 (unsigned long)status);
495 for (tail = bp->tx_tail; tail != head; tail++) {
496 struct macb_tx_skb *tx_skb;
498 struct macb_dma_desc *desc;
501 desc = macb_tx_desc(bp, tail);
503 /* Make hw descriptor updates visible to CPU */
508 if (!(ctrl & MACB_BIT(TX_USED)))
511 tx_skb = macb_tx_skb(bp, tail);
514 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
515 macb_tx_ring_wrap(tail), skb->data);
516 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
518 bp->stats.tx_packets++;
519 bp->stats.tx_bytes += skb->len;
521 dev_kfree_skb_irq(skb);
525 if (netif_queue_stopped(bp->dev)
526 && CIRC_CNT(bp->tx_head, bp->tx_tail,
527 TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
528 netif_wake_queue(bp->dev);
531 static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
532 unsigned int last_frag)
538 struct macb_dma_desc *desc;
540 desc = macb_rx_desc(bp, last_frag);
541 len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
543 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
544 macb_rx_ring_wrap(first_frag),
545 macb_rx_ring_wrap(last_frag), len);
548 * The ethernet header starts NET_IP_ALIGN bytes into the
549 * first buffer. Since the header is 14 bytes, this makes the
550 * payload word-aligned.
552 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
553 * the two padding bytes into the skb so that we avoid hitting
554 * the slowpath in memcpy(), and pull them off afterwards.
556 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
558 bp->stats.rx_dropped++;
559 for (frag = first_frag; ; frag++) {
560 desc = macb_rx_desc(bp, frag);
561 desc->addr &= ~MACB_BIT(RX_USED);
562 if (frag == last_frag)
566 /* Make descriptor updates visible to hardware */
574 skb_checksum_none_assert(skb);
577 for (frag = first_frag; ; frag++) {
578 unsigned int frag_len = RX_BUFFER_SIZE;
580 if (offset + frag_len > len) {
581 BUG_ON(frag != last_frag);
582 frag_len = len - offset;
584 skb_copy_to_linear_data_offset(skb, offset,
585 macb_rx_buffer(bp, frag), frag_len);
586 offset += RX_BUFFER_SIZE;
587 desc = macb_rx_desc(bp, frag);
588 desc->addr &= ~MACB_BIT(RX_USED);
590 if (frag == last_frag)
594 /* Make descriptor updates visible to hardware */
597 __skb_pull(skb, NET_IP_ALIGN);
598 skb->protocol = eth_type_trans(skb, bp->dev);
600 bp->stats.rx_packets++;
601 bp->stats.rx_bytes += skb->len;
602 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
603 skb->len, skb->csum);
604 netif_receive_skb(skb);
609 /* Mark DMA descriptors from begin up to and not including end as unused */
610 static void discard_partial_frame(struct macb *bp, unsigned int begin,
615 for (frag = begin; frag != end; frag++) {
616 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
617 desc->addr &= ~MACB_BIT(RX_USED);
620 /* Make descriptor updates visible to hardware */
624 * When this happens, the hardware stats registers for
625 * whatever caused this is updated, so we don't have to record
630 static int macb_rx(struct macb *bp, int budget)
636 for (tail = bp->rx_tail; budget > 0; tail++) {
637 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
640 /* Make hw descriptor updates visible to CPU */
646 if (!(addr & MACB_BIT(RX_USED)))
649 if (ctrl & MACB_BIT(RX_SOF)) {
650 if (first_frag != -1)
651 discard_partial_frame(bp, first_frag, tail);
655 if (ctrl & MACB_BIT(RX_EOF)) {
657 BUG_ON(first_frag == -1);
659 dropped = macb_rx_frame(bp, first_frag, tail);
668 if (first_frag != -1)
669 bp->rx_tail = first_frag;
676 static int macb_poll(struct napi_struct *napi, int budget)
678 struct macb *bp = container_of(napi, struct macb, napi);
682 status = macb_readl(bp, RSR);
683 macb_writel(bp, RSR, status);
687 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
688 (unsigned long)status, budget);
690 work_done = macb_rx(bp, budget);
691 if (work_done < budget) {
695 * We've done what we can to clean the buffers. Make sure we
696 * get notified when new packets arrive.
698 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
700 /* Packets received while interrupts were disabled */
701 status = macb_readl(bp, RSR);
702 if (unlikely(status))
703 napi_reschedule(napi);
706 /* TODO: Handle errors */
711 static irqreturn_t macb_interrupt(int irq, void *dev_id)
713 struct net_device *dev = dev_id;
714 struct macb *bp = netdev_priv(dev);
717 status = macb_readl(bp, ISR);
719 if (unlikely(!status))
722 spin_lock(&bp->lock);
725 /* close possible race with dev_close */
726 if (unlikely(!netif_running(dev))) {
727 macb_writel(bp, IDR, -1);
731 netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
733 if (status & MACB_RX_INT_FLAGS) {
735 * There's no point taking any more interrupts
736 * until we have processed the buffers. The
737 * scheduling call may fail if the poll routine
738 * is already scheduled, so disable interrupts
741 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
742 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
743 macb_writel(bp, ISR, MACB_BIT(RCOMP));
745 if (napi_schedule_prep(&bp->napi)) {
746 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
747 __napi_schedule(&bp->napi);
751 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
752 macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
753 schedule_work(&bp->tx_error_task);
757 if (status & MACB_BIT(TCOMP))
758 macb_tx_interrupt(bp);
761 * Link change detection isn't possible with RMII, so we'll
762 * add that if/when we get our hands on a full-blown MII PHY.
765 if (status & MACB_BIT(ISR_ROVR)) {
766 /* We missed at least one packet */
768 bp->hw_stats.gem.rx_overruns++;
770 bp->hw_stats.macb.rx_overruns++;
773 if (status & MACB_BIT(HRESP)) {
775 * TODO: Reset the hardware, and maybe move the
776 * netdev_err to a lower-priority context as well
779 netdev_err(dev, "DMA bus error: HRESP not OK\n");
782 status = macb_readl(bp, ISR);
785 spin_unlock(&bp->lock);
790 #ifdef CONFIG_NET_POLL_CONTROLLER
792 * Polling receive - used by netconsole and other diagnostic tools
793 * to allow network i/o with interrupts disabled.
795 static void macb_poll_controller(struct net_device *dev)
799 local_irq_save(flags);
800 macb_interrupt(dev->irq, dev);
801 local_irq_restore(flags);
805 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
807 struct macb *bp = netdev_priv(dev);
809 unsigned int len, entry;
810 struct macb_dma_desc *desc;
811 struct macb_tx_skb *tx_skb;
815 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
817 "start_xmit: len %u head %p data %p tail %p end %p\n",
818 skb->len, skb->head, skb->data,
819 skb_tail_pointer(skb), skb_end_pointer(skb));
820 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
821 skb->data, 16, true);
825 spin_lock_irqsave(&bp->lock, flags);
827 /* This is a hard error, log it. */
828 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
829 netif_stop_queue(dev);
830 spin_unlock_irqrestore(&bp->lock, flags);
831 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
832 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
833 bp->tx_head, bp->tx_tail);
834 return NETDEV_TX_BUSY;
837 entry = macb_tx_ring_wrap(bp->tx_head);
839 netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
840 mapping = dma_map_single(&bp->pdev->dev, skb->data,
843 tx_skb = &bp->tx_skb[entry];
845 tx_skb->mapping = mapping;
846 netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
847 skb->data, (unsigned long)mapping);
849 ctrl = MACB_BF(TX_FRMLEN, len);
850 ctrl |= MACB_BIT(TX_LAST);
851 if (entry == (TX_RING_SIZE - 1))
852 ctrl |= MACB_BIT(TX_WRAP);
854 desc = &bp->tx_ring[entry];
855 desc->addr = mapping;
858 /* Make newly initialized descriptor visible to hardware */
861 skb_tx_timestamp(skb);
863 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
865 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
866 netif_stop_queue(dev);
868 spin_unlock_irqrestore(&bp->lock, flags);
873 static void macb_free_consistent(struct macb *bp)
880 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
881 bp->rx_ring, bp->rx_ring_dma);
885 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
886 bp->tx_ring, bp->tx_ring_dma);
889 if (bp->rx_buffers) {
890 dma_free_coherent(&bp->pdev->dev,
891 RX_RING_SIZE * RX_BUFFER_SIZE,
892 bp->rx_buffers, bp->rx_buffers_dma);
893 bp->rx_buffers = NULL;
897 static int macb_alloc_consistent(struct macb *bp)
901 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
902 bp->tx_skb = kmalloc(size, GFP_KERNEL);
906 size = RX_RING_BYTES;
907 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
908 &bp->rx_ring_dma, GFP_KERNEL);
912 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
913 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
915 size = TX_RING_BYTES;
916 bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
917 &bp->tx_ring_dma, GFP_KERNEL);
921 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
922 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
924 size = RX_RING_SIZE * RX_BUFFER_SIZE;
925 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
926 &bp->rx_buffers_dma, GFP_KERNEL);
930 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
931 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
936 macb_free_consistent(bp);
940 static void macb_init_rings(struct macb *bp)
945 addr = bp->rx_buffers_dma;
946 for (i = 0; i < RX_RING_SIZE; i++) {
947 bp->rx_ring[i].addr = addr;
948 bp->rx_ring[i].ctrl = 0;
949 addr += RX_BUFFER_SIZE;
951 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
953 for (i = 0; i < TX_RING_SIZE; i++) {
954 bp->tx_ring[i].addr = 0;
955 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
957 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
959 bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
962 static void macb_reset_hw(struct macb *bp)
965 * Disable RX and TX (XXX: Should we halt the transmission
968 macb_writel(bp, NCR, 0);
970 /* Clear the stats registers (XXX: Update stats first?) */
971 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
973 /* Clear all status flags */
974 macb_writel(bp, TSR, -1);
975 macb_writel(bp, RSR, -1);
977 /* Disable all interrupts */
978 macb_writel(bp, IDR, -1);
982 static u32 gem_mdc_clk_div(struct macb *bp)
985 unsigned long pclk_hz = clk_get_rate(bp->pclk);
987 if (pclk_hz <= 20000000)
988 config = GEM_BF(CLK, GEM_CLK_DIV8);
989 else if (pclk_hz <= 40000000)
990 config = GEM_BF(CLK, GEM_CLK_DIV16);
991 else if (pclk_hz <= 80000000)
992 config = GEM_BF(CLK, GEM_CLK_DIV32);
993 else if (pclk_hz <= 120000000)
994 config = GEM_BF(CLK, GEM_CLK_DIV48);
995 else if (pclk_hz <= 160000000)
996 config = GEM_BF(CLK, GEM_CLK_DIV64);
998 config = GEM_BF(CLK, GEM_CLK_DIV96);
1003 static u32 macb_mdc_clk_div(struct macb *bp)
1006 unsigned long pclk_hz;
1008 if (macb_is_gem(bp))
1009 return gem_mdc_clk_div(bp);
1011 pclk_hz = clk_get_rate(bp->pclk);
1012 if (pclk_hz <= 20000000)
1013 config = MACB_BF(CLK, MACB_CLK_DIV8);
1014 else if (pclk_hz <= 40000000)
1015 config = MACB_BF(CLK, MACB_CLK_DIV16);
1016 else if (pclk_hz <= 80000000)
1017 config = MACB_BF(CLK, MACB_CLK_DIV32);
1019 config = MACB_BF(CLK, MACB_CLK_DIV64);
1025 * Get the DMA bus width field of the network configuration register that we
1026 * should program. We find the width from decoding the design configuration
1027 * register to find the maximum supported data bus width.
1029 static u32 macb_dbw(struct macb *bp)
1031 if (!macb_is_gem(bp))
1034 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
1036 return GEM_BF(DBW, GEM_DBW128);
1038 return GEM_BF(DBW, GEM_DBW64);
1041 return GEM_BF(DBW, GEM_DBW32);
1046 * Configure the receive DMA engine
1047 * - use the correct receive buffer size
1048 * - set the possibility to use INCR16 bursts
1049 * (if not supported by FIFO, it will fallback to default)
1050 * - set both rx/tx packet buffers to full memory size
1051 * These are configurable parameters for GEM.
1053 static void macb_configure_dma(struct macb *bp)
1057 if (macb_is_gem(bp)) {
1058 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1059 dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
1060 dmacfg |= GEM_BF(FBLDO, 16);
1061 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1062 dmacfg &= ~GEM_BIT(ENDIA);
1063 gem_writel(bp, DMACFG, dmacfg);
1068 * Configure peripheral capacities according to integration options used
1070 static void macb_configure_caps(struct macb *bp)
1072 if (macb_is_gem(bp)) {
1073 if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0)
1074 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
1078 static void macb_init_hw(struct macb *bp)
1083 macb_set_hwaddr(bp);
1085 config = macb_mdc_clk_div(bp);
1086 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
1087 config |= MACB_BIT(PAE); /* PAuse Enable */
1088 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
1089 config |= MACB_BIT(BIG); /* Receive oversized frames */
1090 if (bp->dev->flags & IFF_PROMISC)
1091 config |= MACB_BIT(CAF); /* Copy All Frames */
1092 if (!(bp->dev->flags & IFF_BROADCAST))
1093 config |= MACB_BIT(NBC); /* No BroadCast */
1094 config |= macb_dbw(bp);
1095 macb_writel(bp, NCFGR, config);
1096 bp->speed = SPEED_10;
1097 bp->duplex = DUPLEX_HALF;
1099 macb_configure_dma(bp);
1100 macb_configure_caps(bp);
1102 /* Initialize TX and RX buffers */
1103 macb_writel(bp, RBQP, bp->rx_ring_dma);
1104 macb_writel(bp, TBQP, bp->tx_ring_dma);
1106 /* Enable TX and RX */
1107 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1109 /* Enable interrupts */
1110 macb_writel(bp, IER, (MACB_RX_INT_FLAGS
1112 | MACB_BIT(HRESP)));
1117 * The hash address register is 64 bits long and takes up two
1118 * locations in the memory map. The least significant bits are stored
1119 * in EMAC_HSL and the most significant bits in EMAC_HSH.
1121 * The unicast hash enable and the multicast hash enable bits in the
1122 * network configuration register enable the reception of hash matched
1123 * frames. The destination address is reduced to a 6 bit index into
1124 * the 64 bit hash register using the following hash function. The
1125 * hash function is an exclusive or of every sixth bit of the
1126 * destination address.
1128 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
1129 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
1130 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
1131 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
1132 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
1133 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
1135 * da[0] represents the least significant bit of the first byte
1136 * received, that is, the multicast/unicast indicator, and da[47]
1137 * represents the most significant bit of the last byte received. If
1138 * the hash index, hi[n], points to a bit that is set in the hash
1139 * register then the frame will be matched according to whether the
1140 * frame is multicast or unicast. A multicast match will be signalled
1141 * if the multicast hash enable bit is set, da[0] is 1 and the hash
1142 * index points to a bit set in the hash register. A unicast match
1143 * will be signalled if the unicast hash enable bit is set, da[0] is 0
1144 * and the hash index points to a bit set in the hash register. To
1145 * receive all multicast frames, the hash register should be set with
1146 * all ones and the multicast hash enable bit should be set in the
1147 * network configuration register.
1150 static inline int hash_bit_value(int bitnr, __u8 *addr)
1152 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
1158 * Return the hash index value for the specified address.
1160 static int hash_get_index(__u8 *addr)
1165 for (j = 0; j < 6; j++) {
1166 for (i = 0, bitval = 0; i < 8; i++)
1167 bitval ^= hash_bit_value(i*6 + j, addr);
1169 hash_index |= (bitval << j);
1176 * Add multicast addresses to the internal multicast-hash table.
1178 static void macb_sethashtable(struct net_device *dev)
1180 struct netdev_hw_addr *ha;
1181 unsigned long mc_filter[2];
1183 struct macb *bp = netdev_priv(dev);
1185 mc_filter[0] = mc_filter[1] = 0;
1187 netdev_for_each_mc_addr(ha, dev) {
1188 bitnr = hash_get_index(ha->addr);
1189 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1192 macb_or_gem_writel(bp, HRB, mc_filter[0]);
1193 macb_or_gem_writel(bp, HRT, mc_filter[1]);
1197 * Enable/Disable promiscuous and multicast modes.
1199 void macb_set_rx_mode(struct net_device *dev)
1202 struct macb *bp = netdev_priv(dev);
1204 cfg = macb_readl(bp, NCFGR);
1206 if (dev->flags & IFF_PROMISC)
1207 /* Enable promiscuous mode */
1208 cfg |= MACB_BIT(CAF);
1209 else if (dev->flags & (~IFF_PROMISC))
1210 /* Disable promiscuous mode */
1211 cfg &= ~MACB_BIT(CAF);
1213 if (dev->flags & IFF_ALLMULTI) {
1214 /* Enable all multicast mode */
1215 macb_or_gem_writel(bp, HRB, -1);
1216 macb_or_gem_writel(bp, HRT, -1);
1217 cfg |= MACB_BIT(NCFGR_MTI);
1218 } else if (!netdev_mc_empty(dev)) {
1219 /* Enable specific multicasts */
1220 macb_sethashtable(dev);
1221 cfg |= MACB_BIT(NCFGR_MTI);
1222 } else if (dev->flags & (~IFF_ALLMULTI)) {
1223 /* Disable all multicast mode */
1224 macb_or_gem_writel(bp, HRB, 0);
1225 macb_or_gem_writel(bp, HRT, 0);
1226 cfg &= ~MACB_BIT(NCFGR_MTI);
1229 macb_writel(bp, NCFGR, cfg);
1231 EXPORT_SYMBOL_GPL(macb_set_rx_mode);
1233 static int macb_open(struct net_device *dev)
1235 struct macb *bp = netdev_priv(dev);
1238 netdev_dbg(bp->dev, "open\n");
1240 /* carrier starts down */
1241 netif_carrier_off(dev);
1243 /* if the phy is not yet register, retry later*/
1247 err = macb_alloc_consistent(bp);
1249 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
1254 napi_enable(&bp->napi);
1256 macb_init_rings(bp);
1259 /* schedule a link state check */
1260 phy_start(bp->phy_dev);
1262 netif_start_queue(dev);
1267 static int macb_close(struct net_device *dev)
1269 struct macb *bp = netdev_priv(dev);
1270 unsigned long flags;
1272 netif_stop_queue(dev);
1273 napi_disable(&bp->napi);
1276 phy_stop(bp->phy_dev);
1278 spin_lock_irqsave(&bp->lock, flags);
1280 netif_carrier_off(dev);
1281 spin_unlock_irqrestore(&bp->lock, flags);
1283 macb_free_consistent(bp);
1288 static void gem_update_stats(struct macb *bp)
1290 u32 __iomem *reg = bp->regs + GEM_OTX;
1291 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1292 u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
1294 for (; p < end; p++, reg++)
1295 *p += __raw_readl(reg);
1298 static struct net_device_stats *gem_get_stats(struct macb *bp)
1300 struct gem_stats *hwstat = &bp->hw_stats.gem;
1301 struct net_device_stats *nstat = &bp->stats;
1303 gem_update_stats(bp);
1305 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
1306 hwstat->rx_alignment_errors +
1307 hwstat->rx_resource_errors +
1308 hwstat->rx_overruns +
1309 hwstat->rx_oversize_frames +
1310 hwstat->rx_jabbers +
1311 hwstat->rx_undersized_frames +
1312 hwstat->rx_length_field_frame_errors);
1313 nstat->tx_errors = (hwstat->tx_late_collisions +
1314 hwstat->tx_excessive_collisions +
1315 hwstat->tx_underrun +
1316 hwstat->tx_carrier_sense_errors);
1317 nstat->multicast = hwstat->rx_multicast_frames;
1318 nstat->collisions = (hwstat->tx_single_collision_frames +
1319 hwstat->tx_multiple_collision_frames +
1320 hwstat->tx_excessive_collisions);
1321 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
1322 hwstat->rx_jabbers +
1323 hwstat->rx_undersized_frames +
1324 hwstat->rx_length_field_frame_errors);
1325 nstat->rx_over_errors = hwstat->rx_resource_errors;
1326 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
1327 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
1328 nstat->rx_fifo_errors = hwstat->rx_overruns;
1329 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
1330 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
1331 nstat->tx_fifo_errors = hwstat->tx_underrun;
1336 struct net_device_stats *macb_get_stats(struct net_device *dev)
1338 struct macb *bp = netdev_priv(dev);
1339 struct net_device_stats *nstat = &bp->stats;
1340 struct macb_stats *hwstat = &bp->hw_stats.macb;
1342 if (macb_is_gem(bp))
1343 return gem_get_stats(bp);
1345 /* read stats from hardware */
1346 macb_update_stats(bp);
1348 /* Convert HW stats into netdevice stats */
1349 nstat->rx_errors = (hwstat->rx_fcs_errors +
1350 hwstat->rx_align_errors +
1351 hwstat->rx_resource_errors +
1352 hwstat->rx_overruns +
1353 hwstat->rx_oversize_pkts +
1354 hwstat->rx_jabbers +
1355 hwstat->rx_undersize_pkts +
1356 hwstat->sqe_test_errors +
1357 hwstat->rx_length_mismatch);
1358 nstat->tx_errors = (hwstat->tx_late_cols +
1359 hwstat->tx_excessive_cols +
1360 hwstat->tx_underruns +
1361 hwstat->tx_carrier_errors);
1362 nstat->collisions = (hwstat->tx_single_cols +
1363 hwstat->tx_multiple_cols +
1364 hwstat->tx_excessive_cols);
1365 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1366 hwstat->rx_jabbers +
1367 hwstat->rx_undersize_pkts +
1368 hwstat->rx_length_mismatch);
1369 nstat->rx_over_errors = hwstat->rx_resource_errors +
1370 hwstat->rx_overruns;
1371 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
1372 nstat->rx_frame_errors = hwstat->rx_align_errors;
1373 nstat->rx_fifo_errors = hwstat->rx_overruns;
1374 /* XXX: What does "missed" mean? */
1375 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
1376 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
1377 nstat->tx_fifo_errors = hwstat->tx_underruns;
1378 /* Don't know about heartbeat or window errors... */
1382 EXPORT_SYMBOL_GPL(macb_get_stats);
1384 static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1386 struct macb *bp = netdev_priv(dev);
1387 struct phy_device *phydev = bp->phy_dev;
1392 return phy_ethtool_gset(phydev, cmd);
1395 static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1397 struct macb *bp = netdev_priv(dev);
1398 struct phy_device *phydev = bp->phy_dev;
1403 return phy_ethtool_sset(phydev, cmd);
1406 static int macb_get_regs_len(struct net_device *netdev)
1408 return MACB_GREGS_NBR * sizeof(u32);
1411 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1414 struct macb *bp = netdev_priv(dev);
1415 unsigned int tail, head;
1418 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
1419 | MACB_GREGS_VERSION;
1421 tail = macb_tx_ring_wrap(bp->tx_tail);
1422 head = macb_tx_ring_wrap(bp->tx_head);
1424 regs_buff[0] = macb_readl(bp, NCR);
1425 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
1426 regs_buff[2] = macb_readl(bp, NSR);
1427 regs_buff[3] = macb_readl(bp, TSR);
1428 regs_buff[4] = macb_readl(bp, RBQP);
1429 regs_buff[5] = macb_readl(bp, TBQP);
1430 regs_buff[6] = macb_readl(bp, RSR);
1431 regs_buff[7] = macb_readl(bp, IMR);
1433 regs_buff[8] = tail;
1434 regs_buff[9] = head;
1435 regs_buff[10] = macb_tx_dma(bp, tail);
1436 regs_buff[11] = macb_tx_dma(bp, head);
1438 if (macb_is_gem(bp)) {
1439 regs_buff[12] = gem_readl(bp, USRIO);
1440 regs_buff[13] = gem_readl(bp, DMACFG);
1444 const struct ethtool_ops macb_ethtool_ops = {
1445 .get_settings = macb_get_settings,
1446 .set_settings = macb_set_settings,
1447 .get_regs_len = macb_get_regs_len,
1448 .get_regs = macb_get_regs,
1449 .get_link = ethtool_op_get_link,
1450 .get_ts_info = ethtool_op_get_ts_info,
1452 EXPORT_SYMBOL_GPL(macb_ethtool_ops);
1454 int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1456 struct macb *bp = netdev_priv(dev);
1457 struct phy_device *phydev = bp->phy_dev;
1459 if (!netif_running(dev))
1465 return phy_mii_ioctl(phydev, rq, cmd);
1467 EXPORT_SYMBOL_GPL(macb_ioctl);
1469 static const struct net_device_ops macb_netdev_ops = {
1470 .ndo_open = macb_open,
1471 .ndo_stop = macb_close,
1472 .ndo_start_xmit = macb_start_xmit,
1473 .ndo_set_rx_mode = macb_set_rx_mode,
1474 .ndo_get_stats = macb_get_stats,
1475 .ndo_do_ioctl = macb_ioctl,
1476 .ndo_validate_addr = eth_validate_addr,
1477 .ndo_change_mtu = eth_change_mtu,
1478 .ndo_set_mac_address = eth_mac_addr,
1479 #ifdef CONFIG_NET_POLL_CONTROLLER
1480 .ndo_poll_controller = macb_poll_controller,
1484 #if defined(CONFIG_OF)
1485 static const struct of_device_id macb_dt_ids[] = {
1486 { .compatible = "cdns,at32ap7000-macb" },
1487 { .compatible = "cdns,at91sam9260-macb" },
1488 { .compatible = "cdns,macb" },
1489 { .compatible = "cdns,pc302-gem" },
1490 { .compatible = "cdns,gem" },
1493 MODULE_DEVICE_TABLE(of, macb_dt_ids);
1496 static int __init macb_probe(struct platform_device *pdev)
1498 struct macb_platform_data *pdata;
1499 struct resource *regs;
1500 struct net_device *dev;
1502 struct phy_device *phydev;
1505 struct pinctrl *pinctrl;
1508 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1510 dev_err(&pdev->dev, "no mmio resource defined\n");
1514 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
1515 if (IS_ERR(pinctrl)) {
1516 err = PTR_ERR(pinctrl);
1517 if (err == -EPROBE_DEFER)
1520 dev_warn(&pdev->dev, "No pinctrl provided\n");
1524 dev = alloc_etherdev(sizeof(*bp));
1528 SET_NETDEV_DEV(dev, &pdev->dev);
1530 /* TODO: Actually, we have some interesting features... */
1533 bp = netdev_priv(dev);
1537 spin_lock_init(&bp->lock);
1538 INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
1540 bp->pclk = clk_get(&pdev->dev, "pclk");
1541 if (IS_ERR(bp->pclk)) {
1542 dev_err(&pdev->dev, "failed to get macb_clk\n");
1543 goto err_out_free_dev;
1545 clk_prepare_enable(bp->pclk);
1547 bp->hclk = clk_get(&pdev->dev, "hclk");
1548 if (IS_ERR(bp->hclk)) {
1549 dev_err(&pdev->dev, "failed to get hclk\n");
1550 goto err_out_put_pclk;
1552 clk_prepare_enable(bp->hclk);
1554 bp->regs = ioremap(regs->start, resource_size(regs));
1556 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
1558 goto err_out_disable_clocks;
1561 dev->irq = platform_get_irq(pdev, 0);
1562 err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
1564 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
1566 goto err_out_iounmap;
1569 dev->netdev_ops = &macb_netdev_ops;
1570 netif_napi_add(dev, &bp->napi, macb_poll, 64);
1571 dev->ethtool_ops = &macb_ethtool_ops;
1573 dev->base_addr = regs->start;
1575 /* Set MII management clock divider */
1576 config = macb_mdc_clk_div(bp);
1577 config |= macb_dbw(bp);
1578 macb_writel(bp, NCFGR, config);
1580 mac = of_get_mac_address(pdev->dev.of_node);
1582 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
1584 macb_get_hwaddr(bp);
1586 err = of_get_phy_mode(pdev->dev.of_node);
1588 pdata = pdev->dev.platform_data;
1589 if (pdata && pdata->is_rmii)
1590 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
1592 bp->phy_interface = PHY_INTERFACE_MODE_MII;
1594 bp->phy_interface = err;
1597 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
1598 macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
1599 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
1600 #if defined(CONFIG_ARCH_AT91)
1601 macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
1604 macb_or_gem_writel(bp, USRIO, 0);
1607 #if defined(CONFIG_ARCH_AT91)
1608 macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
1610 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
1613 err = register_netdev(dev);
1615 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1616 goto err_out_free_irq;
1619 err = macb_mii_init(bp);
1621 goto err_out_unregister_netdev;
1623 platform_set_drvdata(pdev, dev);
1625 netif_carrier_off(dev);
1627 netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
1628 macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
1629 dev->irq, dev->dev_addr);
1631 phydev = bp->phy_dev;
1632 netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1633 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
1637 err_out_unregister_netdev:
1638 unregister_netdev(dev);
1640 free_irq(dev->irq, dev);
1643 err_out_disable_clocks:
1644 clk_disable_unprepare(bp->hclk);
1646 clk_disable_unprepare(bp->pclk);
1652 platform_set_drvdata(pdev, NULL);
1656 static int __exit macb_remove(struct platform_device *pdev)
1658 struct net_device *dev;
1661 dev = platform_get_drvdata(pdev);
1664 bp = netdev_priv(dev);
1666 phy_disconnect(bp->phy_dev);
1667 mdiobus_unregister(bp->mii_bus);
1668 kfree(bp->mii_bus->irq);
1669 mdiobus_free(bp->mii_bus);
1670 unregister_netdev(dev);
1671 free_irq(dev->irq, dev);
1673 clk_disable_unprepare(bp->hclk);
1675 clk_disable_unprepare(bp->pclk);
1678 platform_set_drvdata(pdev, NULL);
1685 static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1687 struct net_device *netdev = platform_get_drvdata(pdev);
1688 struct macb *bp = netdev_priv(netdev);
1690 netif_carrier_off(netdev);
1691 netif_device_detach(netdev);
1693 clk_disable_unprepare(bp->hclk);
1694 clk_disable_unprepare(bp->pclk);
1699 static int macb_resume(struct platform_device *pdev)
1701 struct net_device *netdev = platform_get_drvdata(pdev);
1702 struct macb *bp = netdev_priv(netdev);
1704 clk_prepare_enable(bp->pclk);
1705 clk_prepare_enable(bp->hclk);
1707 netif_device_attach(netdev);
1712 #define macb_suspend NULL
1713 #define macb_resume NULL
1716 static struct platform_driver macb_driver = {
1717 .remove = __exit_p(macb_remove),
1718 .suspend = macb_suspend,
1719 .resume = macb_resume,
1722 .owner = THIS_MODULE,
1723 .of_match_table = of_match_ptr(macb_dt_ids),
1727 module_platform_driver_probe(macb_driver, macb_probe);
1729 MODULE_LICENSE("GPL");
1730 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
1731 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1732 MODULE_ALIAS("platform:macb");