1 /* drivers/net/ethernet/freescale/gianfar.c
3 * Gianfar Ethernet Driver
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c
9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
13 * Copyright 2007 MontaVista Software, Inc.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Gianfar: AKA Lambda Draconis, "Dragon"
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
35 * last descriptor of the ring.
37 * When a packet is received, the RXF bit in the
38 * IEVENT register is set, triggering an interrupt when the
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
42 * of frames or amount of time have passed). In NAPI, the
43 * interrupt handler will signal there is work to be done, and
44 * exit. This method will start at the last known empty
45 * descriptor, and process every subsequent descriptor until there
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/init.h>
74 #include <linux/delay.h>
75 #include <linux/netdevice.h>
76 #include <linux/etherdevice.h>
77 #include <linux/skbuff.h>
78 #include <linux/if_vlan.h>
79 #include <linux/spinlock.h>
81 #include <linux/of_mdio.h>
82 #include <linux/of_platform.h>
84 #include <linux/tcp.h>
85 #include <linux/udp.h>
87 #include <linux/net_tstamp.h>
92 #include <asm/uaccess.h>
93 #include <linux/module.h>
94 #include <linux/dma-mapping.h>
95 #include <linux/crc32.h>
96 #include <linux/mii.h>
97 #include <linux/phy.h>
98 #include <linux/phy_fixed.h>
100 #include <linux/of_net.h>
104 #define TX_TIMEOUT (1*HZ)
106 const char gfar_driver_version[] = "1.3";
108 static int gfar_enet_open(struct net_device *dev);
109 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
110 static void gfar_reset_task(struct work_struct *work);
111 static void gfar_timeout(struct net_device *dev);
112 static int gfar_close(struct net_device *dev);
113 struct sk_buff *gfar_new_skb(struct net_device *dev);
114 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
115 struct sk_buff *skb);
116 static int gfar_set_mac_address(struct net_device *dev);
117 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
118 static irqreturn_t gfar_error(int irq, void *dev_id);
119 static irqreturn_t gfar_transmit(int irq, void *dev_id);
120 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
121 static void adjust_link(struct net_device *dev);
122 static void init_registers(struct net_device *dev);
123 static int init_phy(struct net_device *dev);
124 static int gfar_probe(struct platform_device *ofdev);
125 static int gfar_remove(struct platform_device *ofdev);
126 static void free_skb_resources(struct gfar_private *priv);
127 static void gfar_set_multi(struct net_device *dev);
128 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
129 static void gfar_configure_serdes(struct net_device *dev);
130 static int gfar_poll(struct napi_struct *napi, int budget);
131 #ifdef CONFIG_NET_POLL_CONTROLLER
132 static void gfar_netpoll(struct net_device *dev);
134 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
135 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
136 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
137 int amount_pull, struct napi_struct *napi);
138 void gfar_halt(struct net_device *dev);
139 static void gfar_halt_nodisable(struct net_device *dev);
140 void gfar_start(struct net_device *dev);
141 static void gfar_clear_exact_match(struct net_device *dev);
142 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
144 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
146 MODULE_AUTHOR("Freescale Semiconductor, Inc");
147 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148 MODULE_LICENSE("GPL");
150 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
157 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
158 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
159 lstatus |= BD_LFLAG(RXBD_WRAP);
163 bdp->lstatus = lstatus;
166 static int gfar_init_bds(struct net_device *ndev)
168 struct gfar_private *priv = netdev_priv(ndev);
169 struct gfar_priv_tx_q *tx_queue = NULL;
170 struct gfar_priv_rx_q *rx_queue = NULL;
175 for (i = 0; i < priv->num_tx_queues; i++) {
176 tx_queue = priv->tx_queue[i];
177 /* Initialize some variables in our dev structure */
178 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
179 tx_queue->dirty_tx = tx_queue->tx_bd_base;
180 tx_queue->cur_tx = tx_queue->tx_bd_base;
181 tx_queue->skb_curtx = 0;
182 tx_queue->skb_dirtytx = 0;
184 /* Initialize Transmit Descriptor Ring */
185 txbdp = tx_queue->tx_bd_base;
186 for (j = 0; j < tx_queue->tx_ring_size; j++) {
192 /* Set the last descriptor in the ring to indicate wrap */
194 txbdp->status |= TXBD_WRAP;
197 for (i = 0; i < priv->num_rx_queues; i++) {
198 rx_queue = priv->rx_queue[i];
199 rx_queue->cur_rx = rx_queue->rx_bd_base;
200 rx_queue->skb_currx = 0;
201 rxbdp = rx_queue->rx_bd_base;
203 for (j = 0; j < rx_queue->rx_ring_size; j++) {
204 struct sk_buff *skb = rx_queue->rx_skbuff[j];
207 gfar_init_rxbdp(rx_queue, rxbdp,
210 skb = gfar_new_skb(ndev);
212 netdev_err(ndev, "Can't allocate RX buffers\n");
215 rx_queue->rx_skbuff[j] = skb;
217 gfar_new_rxbdp(rx_queue, rxbdp, skb);
228 static int gfar_alloc_skb_resources(struct net_device *ndev)
233 struct gfar_private *priv = netdev_priv(ndev);
234 struct device *dev = &priv->ofdev->dev;
235 struct gfar_priv_tx_q *tx_queue = NULL;
236 struct gfar_priv_rx_q *rx_queue = NULL;
238 priv->total_tx_ring_size = 0;
239 for (i = 0; i < priv->num_tx_queues; i++)
240 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
242 priv->total_rx_ring_size = 0;
243 for (i = 0; i < priv->num_rx_queues; i++)
244 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
246 /* Allocate memory for the buffer descriptors */
247 vaddr = dma_alloc_coherent(dev,
248 sizeof(struct txbd8) * priv->total_tx_ring_size +
249 sizeof(struct rxbd8) * priv->total_rx_ring_size,
252 netif_err(priv, ifup, ndev,
253 "Could not allocate buffer descriptors!\n");
257 for (i = 0; i < priv->num_tx_queues; i++) {
258 tx_queue = priv->tx_queue[i];
259 tx_queue->tx_bd_base = vaddr;
260 tx_queue->tx_bd_dma_base = addr;
261 tx_queue->dev = ndev;
262 /* enet DMA only understands physical addresses */
263 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
264 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
267 /* Start the rx descriptor ring where the tx ring leaves off */
268 for (i = 0; i < priv->num_rx_queues; i++) {
269 rx_queue = priv->rx_queue[i];
270 rx_queue->rx_bd_base = vaddr;
271 rx_queue->rx_bd_dma_base = addr;
272 rx_queue->dev = ndev;
273 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
274 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
277 /* Setup the skbuff rings */
278 for (i = 0; i < priv->num_tx_queues; i++) {
279 tx_queue = priv->tx_queue[i];
280 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
281 tx_queue->tx_ring_size,
283 if (!tx_queue->tx_skbuff) {
284 netif_err(priv, ifup, ndev,
285 "Could not allocate tx_skbuff\n");
289 for (k = 0; k < tx_queue->tx_ring_size; k++)
290 tx_queue->tx_skbuff[k] = NULL;
293 for (i = 0; i < priv->num_rx_queues; i++) {
294 rx_queue = priv->rx_queue[i];
295 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
296 rx_queue->rx_ring_size,
299 if (!rx_queue->rx_skbuff) {
300 netif_err(priv, ifup, ndev,
301 "Could not allocate rx_skbuff\n");
305 for (j = 0; j < rx_queue->rx_ring_size; j++)
306 rx_queue->rx_skbuff[j] = NULL;
309 if (gfar_init_bds(ndev))
315 free_skb_resources(priv);
319 static void gfar_init_tx_rx_base(struct gfar_private *priv)
321 struct gfar __iomem *regs = priv->gfargrp[0].regs;
325 baddr = ®s->tbase0;
326 for (i = 0; i < priv->num_tx_queues; i++) {
327 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
331 baddr = ®s->rbase0;
332 for (i = 0; i < priv->num_rx_queues; i++) {
333 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
338 static void gfar_init_mac(struct net_device *ndev)
340 struct gfar_private *priv = netdev_priv(ndev);
341 struct gfar __iomem *regs = priv->gfargrp[0].regs;
346 /* write the tx/rx base registers */
347 gfar_init_tx_rx_base(priv);
349 /* Configure the coalescing support */
350 gfar_configure_coalescing(priv, 0xFF, 0xFF);
352 if (priv->rx_filer_enable) {
353 rctrl |= RCTRL_FILREN;
354 /* Program the RIR0 reg with the required distribution */
355 gfar_write(®s->rir0, DEFAULT_RIR0);
358 /* Restore PROMISC mode */
359 if (ndev->flags & IFF_PROMISC)
362 if (ndev->features & NETIF_F_RXCSUM)
363 rctrl |= RCTRL_CHECKSUMMING;
365 if (priv->extended_hash) {
366 rctrl |= RCTRL_EXTHASH;
368 gfar_clear_exact_match(ndev);
373 rctrl &= ~RCTRL_PAL_MASK;
374 rctrl |= RCTRL_PADDING(priv->padding);
377 /* Insert receive time stamps into padding alignment bytes */
378 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
379 rctrl &= ~RCTRL_PAL_MASK;
380 rctrl |= RCTRL_PADDING(8);
384 /* Enable HW time stamping if requested from user space */
385 if (priv->hwts_rx_en)
386 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
388 if (ndev->features & NETIF_F_HW_VLAN_RX)
389 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
391 /* Init rctrl based on our settings */
392 gfar_write(®s->rctrl, rctrl);
394 if (ndev->features & NETIF_F_IP_CSUM)
395 tctrl |= TCTRL_INIT_CSUM;
397 if (priv->prio_sched_en)
398 tctrl |= TCTRL_TXSCHED_PRIO;
400 tctrl |= TCTRL_TXSCHED_WRRS;
401 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
402 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
405 gfar_write(®s->tctrl, tctrl);
407 /* Set the extraction length and index */
408 attrs = ATTRELI_EL(priv->rx_stash_size) |
409 ATTRELI_EI(priv->rx_stash_index);
411 gfar_write(®s->attreli, attrs);
413 /* Start with defaults, and add stashing or locking
414 * depending on the approprate variables
416 attrs = ATTR_INIT_SETTINGS;
418 if (priv->bd_stash_en)
419 attrs |= ATTR_BDSTASH;
421 if (priv->rx_stash_size != 0)
422 attrs |= ATTR_BUFSTASH;
424 gfar_write(®s->attr, attrs);
426 gfar_write(®s->fifo_tx_thr, priv->fifo_threshold);
427 gfar_write(®s->fifo_tx_starve, priv->fifo_starve);
428 gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off);
431 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
433 struct gfar_private *priv = netdev_priv(dev);
434 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
435 unsigned long tx_packets = 0, tx_bytes = 0;
438 for (i = 0; i < priv->num_rx_queues; i++) {
439 rx_packets += priv->rx_queue[i]->stats.rx_packets;
440 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
441 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
444 dev->stats.rx_packets = rx_packets;
445 dev->stats.rx_bytes = rx_bytes;
446 dev->stats.rx_dropped = rx_dropped;
448 for (i = 0; i < priv->num_tx_queues; i++) {
449 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
450 tx_packets += priv->tx_queue[i]->stats.tx_packets;
453 dev->stats.tx_bytes = tx_bytes;
454 dev->stats.tx_packets = tx_packets;
459 static const struct net_device_ops gfar_netdev_ops = {
460 .ndo_open = gfar_enet_open,
461 .ndo_start_xmit = gfar_start_xmit,
462 .ndo_stop = gfar_close,
463 .ndo_change_mtu = gfar_change_mtu,
464 .ndo_set_features = gfar_set_features,
465 .ndo_set_rx_mode = gfar_set_multi,
466 .ndo_tx_timeout = gfar_timeout,
467 .ndo_do_ioctl = gfar_ioctl,
468 .ndo_get_stats = gfar_get_stats,
469 .ndo_set_mac_address = eth_mac_addr,
470 .ndo_validate_addr = eth_validate_addr,
471 #ifdef CONFIG_NET_POLL_CONTROLLER
472 .ndo_poll_controller = gfar_netpoll,
476 void lock_rx_qs(struct gfar_private *priv)
480 for (i = 0; i < priv->num_rx_queues; i++)
481 spin_lock(&priv->rx_queue[i]->rxlock);
484 void lock_tx_qs(struct gfar_private *priv)
488 for (i = 0; i < priv->num_tx_queues; i++)
489 spin_lock(&priv->tx_queue[i]->txlock);
492 void unlock_rx_qs(struct gfar_private *priv)
496 for (i = 0; i < priv->num_rx_queues; i++)
497 spin_unlock(&priv->rx_queue[i]->rxlock);
500 void unlock_tx_qs(struct gfar_private *priv)
504 for (i = 0; i < priv->num_tx_queues; i++)
505 spin_unlock(&priv->tx_queue[i]->txlock);
508 static bool gfar_is_vlan_on(struct gfar_private *priv)
510 return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
511 (priv->ndev->features & NETIF_F_HW_VLAN_TX);
514 /* Returns 1 if incoming frames use an FCB */
515 static inline int gfar_uses_fcb(struct gfar_private *priv)
517 return gfar_is_vlan_on(priv) ||
518 (priv->ndev->features & NETIF_F_RXCSUM) ||
519 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
522 static void free_tx_pointers(struct gfar_private *priv)
526 for (i = 0; i < priv->num_tx_queues; i++)
527 kfree(priv->tx_queue[i]);
530 static void free_rx_pointers(struct gfar_private *priv)
534 for (i = 0; i < priv->num_rx_queues; i++)
535 kfree(priv->rx_queue[i]);
538 static void unmap_group_regs(struct gfar_private *priv)
542 for (i = 0; i < MAXGROUPS; i++)
543 if (priv->gfargrp[i].regs)
544 iounmap(priv->gfargrp[i].regs);
547 static void free_gfar_dev(struct gfar_private *priv)
551 for (i = 0; i < priv->num_grps; i++)
552 for (j = 0; j < GFAR_NUM_IRQS; j++) {
553 kfree(priv->gfargrp[i].irqinfo[j]);
554 priv->gfargrp[i].irqinfo[j] = NULL;
557 free_netdev(priv->ndev);
560 static void disable_napi(struct gfar_private *priv)
564 for (i = 0; i < priv->num_grps; i++)
565 napi_disable(&priv->gfargrp[i].napi);
568 static void enable_napi(struct gfar_private *priv)
572 for (i = 0; i < priv->num_grps; i++)
573 napi_enable(&priv->gfargrp[i].napi);
576 static int gfar_parse_group(struct device_node *np,
577 struct gfar_private *priv, const char *model)
579 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
583 if (priv->mode == MQ_MG_MODE) {
584 for (i = 0; i < GFAR_NUM_IRQS; i++) {
585 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
587 if (!grp->irqinfo[i])
591 grp->irqinfo[GFAR_TX] = kzalloc(sizeof(struct gfar_irqinfo),
593 if (!grp->irqinfo[GFAR_TX])
595 grp->irqinfo[GFAR_RX] = grp->irqinfo[GFAR_ER] = NULL;
598 grp->regs = of_iomap(np, 0);
602 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
604 /* If we aren't the FEC we have multiple interrupts */
605 if (model && strcasecmp(model, "FEC")) {
606 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
607 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
608 if (gfar_irq(grp, TX)->irq == NO_IRQ ||
609 gfar_irq(grp, RX)->irq == NO_IRQ ||
610 gfar_irq(grp, ER)->irq == NO_IRQ)
614 grp->grp_id = priv->num_grps;
616 spin_lock_init(&grp->grplock);
617 if (priv->mode == MQ_MG_MODE) {
618 queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
619 grp->rx_bit_map = queue_mask ?
620 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
621 queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
622 grp->tx_bit_map = queue_mask ?
623 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
625 grp->rx_bit_map = 0xFF;
626 grp->tx_bit_map = 0xFF;
633 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
637 const void *mac_addr;
639 struct net_device *dev = NULL;
640 struct gfar_private *priv = NULL;
641 struct device_node *np = ofdev->dev.of_node;
642 struct device_node *child = NULL;
644 const u32 *stash_len;
645 const u32 *stash_idx;
646 unsigned int num_tx_qs, num_rx_qs;
647 u32 *tx_queues, *rx_queues;
649 if (!np || !of_device_is_available(np))
652 /* parse the num of tx and rx queues */
653 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
654 num_tx_qs = tx_queues ? *tx_queues : 1;
656 if (num_tx_qs > MAX_TX_QS) {
657 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
658 num_tx_qs, MAX_TX_QS);
659 pr_err("Cannot do alloc_etherdev, aborting\n");
663 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
664 num_rx_qs = rx_queues ? *rx_queues : 1;
666 if (num_rx_qs > MAX_RX_QS) {
667 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
668 num_rx_qs, MAX_RX_QS);
669 pr_err("Cannot do alloc_etherdev, aborting\n");
673 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
678 priv = netdev_priv(dev);
679 priv->node = ofdev->dev.of_node;
682 priv->num_tx_queues = num_tx_qs;
683 netif_set_real_num_rx_queues(dev, num_rx_qs);
684 priv->num_rx_queues = num_rx_qs;
685 priv->num_grps = 0x0;
687 /* Init Rx queue filer rule set linked list */
688 INIT_LIST_HEAD(&priv->rx_list.list);
689 priv->rx_list.count = 0;
690 mutex_init(&priv->rx_queue_access);
692 model = of_get_property(np, "model", NULL);
694 for (i = 0; i < MAXGROUPS; i++)
695 priv->gfargrp[i].regs = NULL;
697 /* Parse and initialize group specific information */
698 if (of_device_is_compatible(np, "fsl,etsec2")) {
699 priv->mode = MQ_MG_MODE;
700 for_each_child_of_node(np, child) {
701 err = gfar_parse_group(child, priv, model);
706 priv->mode = SQ_SG_MODE;
707 err = gfar_parse_group(np, priv, model);
712 for (i = 0; i < priv->num_tx_queues; i++)
713 priv->tx_queue[i] = NULL;
714 for (i = 0; i < priv->num_rx_queues; i++)
715 priv->rx_queue[i] = NULL;
717 for (i = 0; i < priv->num_tx_queues; i++) {
718 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
720 if (!priv->tx_queue[i]) {
722 goto tx_alloc_failed;
724 priv->tx_queue[i]->tx_skbuff = NULL;
725 priv->tx_queue[i]->qindex = i;
726 priv->tx_queue[i]->dev = dev;
727 spin_lock_init(&(priv->tx_queue[i]->txlock));
730 for (i = 0; i < priv->num_rx_queues; i++) {
731 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
733 if (!priv->rx_queue[i]) {
735 goto rx_alloc_failed;
737 priv->rx_queue[i]->rx_skbuff = NULL;
738 priv->rx_queue[i]->qindex = i;
739 priv->rx_queue[i]->dev = dev;
740 spin_lock_init(&(priv->rx_queue[i]->rxlock));
744 stash = of_get_property(np, "bd-stash", NULL);
747 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
748 priv->bd_stash_en = 1;
751 stash_len = of_get_property(np, "rx-stash-len", NULL);
754 priv->rx_stash_size = *stash_len;
756 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
759 priv->rx_stash_index = *stash_idx;
761 if (stash_len || stash_idx)
762 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
764 mac_addr = of_get_mac_address(np);
767 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
769 if (model && !strcasecmp(model, "TSEC"))
770 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
771 FSL_GIANFAR_DEV_HAS_COALESCE |
772 FSL_GIANFAR_DEV_HAS_RMON |
773 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
775 if (model && !strcasecmp(model, "eTSEC"))
776 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
777 FSL_GIANFAR_DEV_HAS_COALESCE |
778 FSL_GIANFAR_DEV_HAS_RMON |
779 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
780 FSL_GIANFAR_DEV_HAS_PADDING |
781 FSL_GIANFAR_DEV_HAS_CSUM |
782 FSL_GIANFAR_DEV_HAS_VLAN |
783 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
784 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
785 FSL_GIANFAR_DEV_HAS_TIMER;
787 ctype = of_get_property(np, "phy-connection-type", NULL);
789 /* We only care about rgmii-id. The rest are autodetected */
790 if (ctype && !strcmp(ctype, "rgmii-id"))
791 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
793 priv->interface = PHY_INTERFACE_MODE_MII;
795 if (of_get_property(np, "fsl,magic-packet", NULL))
796 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
798 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
800 /* Find the TBI PHY. If it's not there, we don't support SGMII */
801 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
806 free_rx_pointers(priv);
808 free_tx_pointers(priv);
810 unmap_group_regs(priv);
815 static int gfar_hwtstamp_ioctl(struct net_device *netdev,
816 struct ifreq *ifr, int cmd)
818 struct hwtstamp_config config;
819 struct gfar_private *priv = netdev_priv(netdev);
821 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
824 /* reserved for future extensions */
828 switch (config.tx_type) {
829 case HWTSTAMP_TX_OFF:
830 priv->hwts_tx_en = 0;
833 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
835 priv->hwts_tx_en = 1;
841 switch (config.rx_filter) {
842 case HWTSTAMP_FILTER_NONE:
843 if (priv->hwts_rx_en) {
845 priv->hwts_rx_en = 0;
846 startup_gfar(netdev);
850 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
852 if (!priv->hwts_rx_en) {
854 priv->hwts_rx_en = 1;
855 startup_gfar(netdev);
857 config.rx_filter = HWTSTAMP_FILTER_ALL;
861 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
865 /* Ioctl MII Interface */
866 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
868 struct gfar_private *priv = netdev_priv(dev);
870 if (!netif_running(dev))
873 if (cmd == SIOCSHWTSTAMP)
874 return gfar_hwtstamp_ioctl(dev, rq, cmd);
879 return phy_mii_ioctl(priv->phydev, rq, cmd);
882 static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
884 unsigned int new_bit_map = 0x0;
885 int mask = 0x1 << (max_qs - 1), i;
887 for (i = 0; i < max_qs; i++) {
889 new_bit_map = new_bit_map + (1 << i);
895 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
898 u32 rqfpr = FPR_FILER_MASK;
902 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
903 priv->ftp_rqfpr[rqfar] = rqfpr;
904 priv->ftp_rqfcr[rqfar] = rqfcr;
905 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
908 rqfcr = RQFCR_CMP_NOMATCH;
909 priv->ftp_rqfpr[rqfar] = rqfpr;
910 priv->ftp_rqfcr[rqfar] = rqfcr;
911 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
914 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
916 priv->ftp_rqfcr[rqfar] = rqfcr;
917 priv->ftp_rqfpr[rqfar] = rqfpr;
918 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
921 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
923 priv->ftp_rqfcr[rqfar] = rqfcr;
924 priv->ftp_rqfpr[rqfar] = rqfpr;
925 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
930 static void gfar_init_filer_table(struct gfar_private *priv)
933 u32 rqfar = MAX_FILER_IDX;
935 u32 rqfpr = FPR_FILER_MASK;
938 rqfcr = RQFCR_CMP_MATCH;
939 priv->ftp_rqfcr[rqfar] = rqfcr;
940 priv->ftp_rqfpr[rqfar] = rqfpr;
941 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
943 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
944 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
945 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
946 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
947 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
948 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
950 /* cur_filer_idx indicated the first non-masked rule */
951 priv->cur_filer_idx = rqfar;
953 /* Rest are masked rules */
954 rqfcr = RQFCR_CMP_NOMATCH;
955 for (i = 0; i < rqfar; i++) {
956 priv->ftp_rqfcr[i] = rqfcr;
957 priv->ftp_rqfpr[i] = rqfpr;
958 gfar_write_filer(priv, i, rqfcr, rqfpr);
962 static void gfar_detect_errata(struct gfar_private *priv)
964 struct device *dev = &priv->ofdev->dev;
965 unsigned int pvr = mfspr(SPRN_PVR);
966 unsigned int svr = mfspr(SPRN_SVR);
967 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
968 unsigned int rev = svr & 0xffff;
970 /* MPC8313 Rev 2.0 and higher; All MPC837x */
971 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
972 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
973 priv->errata |= GFAR_ERRATA_74;
975 /* MPC8313 and MPC837x all rev */
976 if ((pvr == 0x80850010 && mod == 0x80b0) ||
977 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
978 priv->errata |= GFAR_ERRATA_76;
980 /* MPC8313 and MPC837x all rev */
981 if ((pvr == 0x80850010 && mod == 0x80b0) ||
982 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
983 priv->errata |= GFAR_ERRATA_A002;
985 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
986 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
987 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
988 priv->errata |= GFAR_ERRATA_12;
991 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
995 /* Set up the ethernet device structure, private data,
996 * and anything else we need before we start
998 static int gfar_probe(struct platform_device *ofdev)
1001 struct net_device *dev = NULL;
1002 struct gfar_private *priv = NULL;
1003 struct gfar __iomem *regs = NULL;
1004 int err = 0, i, grp_idx = 0;
1005 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
1009 err = gfar_of_init(ofdev, &dev);
1014 priv = netdev_priv(dev);
1016 priv->ofdev = ofdev;
1017 priv->node = ofdev->dev.of_node;
1018 SET_NETDEV_DEV(dev, &ofdev->dev);
1020 spin_lock_init(&priv->bflock);
1021 INIT_WORK(&priv->reset_task, gfar_reset_task);
1023 dev_set_drvdata(&ofdev->dev, priv);
1024 regs = priv->gfargrp[0].regs;
1026 gfar_detect_errata(priv);
1028 /* Stop the DMA engine now, in case it was running before
1029 * (The firmware could have used it, and left it running).
1033 /* Reset MAC layer */
1034 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
1036 /* We need to delay at least 3 TX clocks */
1039 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1040 gfar_write(®s->maccfg1, tempval);
1042 /* Initialize MACCFG2. */
1043 tempval = MACCFG2_INIT_SETTINGS;
1044 if (gfar_has_errata(priv, GFAR_ERRATA_74))
1045 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1046 gfar_write(®s->maccfg2, tempval);
1048 /* Initialize ECNTRL */
1049 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
1051 /* Set the dev->base_addr to the gfar reg region */
1052 dev->base_addr = (unsigned long) regs;
1054 SET_NETDEV_DEV(dev, &ofdev->dev);
1056 /* Fill in the dev structure */
1057 dev->watchdog_timeo = TX_TIMEOUT;
1059 dev->netdev_ops = &gfar_netdev_ops;
1060 dev->ethtool_ops = &gfar_ethtool_ops;
1062 /* Register for napi ...We are registering NAPI for each grp */
1063 for (i = 0; i < priv->num_grps; i++)
1064 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
1067 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1068 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1070 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1071 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1074 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1075 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1076 dev->features |= NETIF_F_HW_VLAN_RX;
1079 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1080 priv->extended_hash = 1;
1081 priv->hash_width = 9;
1083 priv->hash_regs[0] = ®s->igaddr0;
1084 priv->hash_regs[1] = ®s->igaddr1;
1085 priv->hash_regs[2] = ®s->igaddr2;
1086 priv->hash_regs[3] = ®s->igaddr3;
1087 priv->hash_regs[4] = ®s->igaddr4;
1088 priv->hash_regs[5] = ®s->igaddr5;
1089 priv->hash_regs[6] = ®s->igaddr6;
1090 priv->hash_regs[7] = ®s->igaddr7;
1091 priv->hash_regs[8] = ®s->gaddr0;
1092 priv->hash_regs[9] = ®s->gaddr1;
1093 priv->hash_regs[10] = ®s->gaddr2;
1094 priv->hash_regs[11] = ®s->gaddr3;
1095 priv->hash_regs[12] = ®s->gaddr4;
1096 priv->hash_regs[13] = ®s->gaddr5;
1097 priv->hash_regs[14] = ®s->gaddr6;
1098 priv->hash_regs[15] = ®s->gaddr7;
1101 priv->extended_hash = 0;
1102 priv->hash_width = 8;
1104 priv->hash_regs[0] = ®s->gaddr0;
1105 priv->hash_regs[1] = ®s->gaddr1;
1106 priv->hash_regs[2] = ®s->gaddr2;
1107 priv->hash_regs[3] = ®s->gaddr3;
1108 priv->hash_regs[4] = ®s->gaddr4;
1109 priv->hash_regs[5] = ®s->gaddr5;
1110 priv->hash_regs[6] = ®s->gaddr6;
1111 priv->hash_regs[7] = ®s->gaddr7;
1114 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
1115 priv->padding = DEFAULT_PADDING;
1119 if (dev->features & NETIF_F_IP_CSUM ||
1120 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1121 dev->needed_headroom = GMAC_FCB_LEN;
1123 /* Program the isrg regs only if number of grps > 1 */
1124 if (priv->num_grps > 1) {
1125 baddr = ®s->isrg0;
1126 for (i = 0; i < priv->num_grps; i++) {
1127 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1128 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1129 gfar_write(baddr, isrg);
1135 /* Need to reverse the bit maps as bit_map's MSB is q0
1136 * but, for_each_set_bit parses from right to left, which
1137 * basically reverses the queue numbers
1139 for (i = 0; i< priv->num_grps; i++) {
1140 priv->gfargrp[i].tx_bit_map =
1141 reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1142 priv->gfargrp[i].rx_bit_map =
1143 reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1146 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1147 * also assign queues to groups
1149 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1150 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1152 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1153 priv->num_rx_queues) {
1154 priv->gfargrp[grp_idx].num_rx_queues++;
1155 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1156 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1157 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1159 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1161 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1162 priv->num_tx_queues) {
1163 priv->gfargrp[grp_idx].num_tx_queues++;
1164 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1165 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1166 tqueue = tqueue | (TQUEUE_EN0 >> i);
1168 priv->gfargrp[grp_idx].rstat = rstat;
1169 priv->gfargrp[grp_idx].tstat = tstat;
1173 gfar_write(®s->rqueue, rqueue);
1174 gfar_write(®s->tqueue, tqueue);
1176 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1178 /* Initializing some of the rx/tx queue level parameters */
1179 for (i = 0; i < priv->num_tx_queues; i++) {
1180 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1181 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1182 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1183 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1186 for (i = 0; i < priv->num_rx_queues; i++) {
1187 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1188 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1189 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1192 /* always enable rx filer */
1193 priv->rx_filer_enable = 1;
1194 /* Enable most messages by default */
1195 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1196 /* use pritority h/w tx queue scheduling for single queue devices */
1197 if (priv->num_tx_queues == 1)
1198 priv->prio_sched_en = 1;
1200 /* Carrier starts down, phylib will bring it up */
1201 netif_carrier_off(dev);
1203 err = register_netdev(dev);
1206 pr_err("%s: Cannot register net device, aborting\n", dev->name);
1210 device_init_wakeup(&dev->dev,
1211 priv->device_flags &
1212 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1214 /* fill out IRQ number and name fields */
1215 for (i = 0; i < priv->num_grps; i++) {
1216 struct gfar_priv_grp *grp = &priv->gfargrp[i];
1217 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1218 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1219 dev->name, "_g", '0' + i, "_tx");
1220 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1221 dev->name, "_g", '0' + i, "_rx");
1222 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1223 dev->name, "_g", '0' + i, "_er");
1225 strcpy(gfar_irq(grp, TX)->name, dev->name);
1228 /* Initialize the filer table */
1229 gfar_init_filer_table(priv);
1231 /* Create all the sysfs files */
1232 gfar_init_sysfs(dev);
1234 /* Print out the device info */
1235 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1237 /* Even more device info helps when determining which kernel
1238 * provided which set of benchmarks.
1240 netdev_info(dev, "Running with NAPI enabled\n");
1241 for (i = 0; i < priv->num_rx_queues; i++)
1242 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1243 i, priv->rx_queue[i]->rx_ring_size);
1244 for (i = 0; i < priv->num_tx_queues; i++)
1245 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1246 i, priv->tx_queue[i]->tx_ring_size);
1251 unmap_group_regs(priv);
1252 free_tx_pointers(priv);
1253 free_rx_pointers(priv);
1255 of_node_put(priv->phy_node);
1257 of_node_put(priv->tbi_node);
1258 free_gfar_dev(priv);
1262 static int gfar_remove(struct platform_device *ofdev)
1264 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
1267 of_node_put(priv->phy_node);
1269 of_node_put(priv->tbi_node);
1271 dev_set_drvdata(&ofdev->dev, NULL);
1273 unregister_netdev(priv->ndev);
1274 unmap_group_regs(priv);
1275 free_gfar_dev(priv);
1282 static int gfar_suspend(struct device *dev)
1284 struct gfar_private *priv = dev_get_drvdata(dev);
1285 struct net_device *ndev = priv->ndev;
1286 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1287 unsigned long flags;
1290 int magic_packet = priv->wol_en &&
1291 (priv->device_flags &
1292 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1294 netif_device_detach(ndev);
1296 if (netif_running(ndev)) {
1298 local_irq_save(flags);
1302 gfar_halt_nodisable(ndev);
1304 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1305 tempval = gfar_read(®s->maccfg1);
1307 tempval &= ~MACCFG1_TX_EN;
1310 tempval &= ~MACCFG1_RX_EN;
1312 gfar_write(®s->maccfg1, tempval);
1316 local_irq_restore(flags);
1321 /* Enable interrupt on Magic Packet */
1322 gfar_write(®s->imask, IMASK_MAG);
1324 /* Enable Magic Packet mode */
1325 tempval = gfar_read(®s->maccfg2);
1326 tempval |= MACCFG2_MPEN;
1327 gfar_write(®s->maccfg2, tempval);
1329 phy_stop(priv->phydev);
1336 static int gfar_resume(struct device *dev)
1338 struct gfar_private *priv = dev_get_drvdata(dev);
1339 struct net_device *ndev = priv->ndev;
1340 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1341 unsigned long flags;
1343 int magic_packet = priv->wol_en &&
1344 (priv->device_flags &
1345 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1347 if (!netif_running(ndev)) {
1348 netif_device_attach(ndev);
1352 if (!magic_packet && priv->phydev)
1353 phy_start(priv->phydev);
1355 /* Disable Magic Packet mode, in case something
1358 local_irq_save(flags);
1362 tempval = gfar_read(®s->maccfg2);
1363 tempval &= ~MACCFG2_MPEN;
1364 gfar_write(®s->maccfg2, tempval);
1370 local_irq_restore(flags);
1372 netif_device_attach(ndev);
1379 static int gfar_restore(struct device *dev)
1381 struct gfar_private *priv = dev_get_drvdata(dev);
1382 struct net_device *ndev = priv->ndev;
1384 if (!netif_running(ndev)) {
1385 netif_device_attach(ndev);
1390 if (gfar_init_bds(ndev)) {
1391 free_skb_resources(priv);
1395 init_registers(ndev);
1396 gfar_set_mac_address(ndev);
1397 gfar_init_mac(ndev);
1402 priv->oldduplex = -1;
1405 phy_start(priv->phydev);
1407 netif_device_attach(ndev);
1413 static struct dev_pm_ops gfar_pm_ops = {
1414 .suspend = gfar_suspend,
1415 .resume = gfar_resume,
1416 .freeze = gfar_suspend,
1417 .thaw = gfar_resume,
1418 .restore = gfar_restore,
1421 #define GFAR_PM_OPS (&gfar_pm_ops)
1425 #define GFAR_PM_OPS NULL
1429 /* Reads the controller's registers to determine what interface
1430 * connects it to the PHY.
1432 static phy_interface_t gfar_get_interface(struct net_device *dev)
1434 struct gfar_private *priv = netdev_priv(dev);
1435 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1438 ecntrl = gfar_read(®s->ecntrl);
1440 if (ecntrl & ECNTRL_SGMII_MODE)
1441 return PHY_INTERFACE_MODE_SGMII;
1443 if (ecntrl & ECNTRL_TBI_MODE) {
1444 if (ecntrl & ECNTRL_REDUCED_MODE)
1445 return PHY_INTERFACE_MODE_RTBI;
1447 return PHY_INTERFACE_MODE_TBI;
1450 if (ecntrl & ECNTRL_REDUCED_MODE) {
1451 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1452 return PHY_INTERFACE_MODE_RMII;
1455 phy_interface_t interface = priv->interface;
1457 /* This isn't autodetected right now, so it must
1458 * be set by the device tree or platform code.
1460 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1461 return PHY_INTERFACE_MODE_RGMII_ID;
1463 return PHY_INTERFACE_MODE_RGMII;
1467 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1468 return PHY_INTERFACE_MODE_GMII;
1470 return PHY_INTERFACE_MODE_MII;
1474 /* Initializes driver's PHY state, and attaches to the PHY.
1475 * Returns 0 on success.
1477 static int init_phy(struct net_device *dev)
1479 struct gfar_private *priv = netdev_priv(dev);
1480 uint gigabit_support =
1481 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1482 SUPPORTED_1000baseT_Full : 0;
1483 phy_interface_t interface;
1487 priv->oldduplex = -1;
1489 interface = gfar_get_interface(dev);
1491 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1494 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1496 if (!priv->phydev) {
1497 dev_err(&dev->dev, "could not attach to PHY\n");
1501 if (interface == PHY_INTERFACE_MODE_SGMII)
1502 gfar_configure_serdes(dev);
1504 /* Remove any features not supported by the controller */
1505 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1506 priv->phydev->advertising = priv->phydev->supported;
1511 /* Initialize TBI PHY interface for communicating with the
1512 * SERDES lynx PHY on the chip. We communicate with this PHY
1513 * through the MDIO bus on each controller, treating it as a
1514 * "normal" PHY at the address found in the TBIPA register. We assume
1515 * that the TBIPA register is valid. Either the MDIO bus code will set
1516 * it to a value that doesn't conflict with other PHYs on the bus, or the
1517 * value doesn't matter, as there are no other PHYs on the bus.
1519 static void gfar_configure_serdes(struct net_device *dev)
1521 struct gfar_private *priv = netdev_priv(dev);
1522 struct phy_device *tbiphy;
1524 if (!priv->tbi_node) {
1525 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1526 "device tree specify a tbi-handle\n");
1530 tbiphy = of_phy_find_device(priv->tbi_node);
1532 dev_err(&dev->dev, "error: Could not get TBI device\n");
1536 /* If the link is already up, we must already be ok, and don't need to
1537 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1538 * everything for us? Resetting it takes the link down and requires
1539 * several seconds for it to come back.
1541 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1544 /* Single clk mode, mii mode off(for serdes communication) */
1545 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1547 phy_write(tbiphy, MII_ADVERTISE,
1548 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1549 ADVERTISE_1000XPSE_ASYM);
1551 phy_write(tbiphy, MII_BMCR,
1552 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1556 static void init_registers(struct net_device *dev)
1558 struct gfar_private *priv = netdev_priv(dev);
1559 struct gfar __iomem *regs = NULL;
1562 for (i = 0; i < priv->num_grps; i++) {
1563 regs = priv->gfargrp[i].regs;
1565 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
1567 /* Initialize IMASK */
1568 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1571 regs = priv->gfargrp[0].regs;
1572 /* Init hash registers to zero */
1573 gfar_write(®s->igaddr0, 0);
1574 gfar_write(®s->igaddr1, 0);
1575 gfar_write(®s->igaddr2, 0);
1576 gfar_write(®s->igaddr3, 0);
1577 gfar_write(®s->igaddr4, 0);
1578 gfar_write(®s->igaddr5, 0);
1579 gfar_write(®s->igaddr6, 0);
1580 gfar_write(®s->igaddr7, 0);
1582 gfar_write(®s->gaddr0, 0);
1583 gfar_write(®s->gaddr1, 0);
1584 gfar_write(®s->gaddr2, 0);
1585 gfar_write(®s->gaddr3, 0);
1586 gfar_write(®s->gaddr4, 0);
1587 gfar_write(®s->gaddr5, 0);
1588 gfar_write(®s->gaddr6, 0);
1589 gfar_write(®s->gaddr7, 0);
1591 /* Zero out the rmon mib registers if it has them */
1592 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1593 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1595 /* Mask off the CAM interrupts */
1596 gfar_write(®s->rmon.cam1, 0xffffffff);
1597 gfar_write(®s->rmon.cam2, 0xffffffff);
1600 /* Initialize the max receive buffer length */
1601 gfar_write(®s->mrblr, priv->rx_buffer_size);
1603 /* Initialize the Minimum Frame Length Register */
1604 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
1607 static int __gfar_is_rx_idle(struct gfar_private *priv)
1611 /* Normaly TSEC should not hang on GRS commands, so we should
1612 * actually wait for IEVENT_GRSC flag.
1614 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1617 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1618 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1619 * and the Rx can be safely reset.
1621 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1623 if ((res & 0xffff) == (res >> 16))
1629 /* Halt the receive and transmit queues */
1630 static void gfar_halt_nodisable(struct net_device *dev)
1632 struct gfar_private *priv = netdev_priv(dev);
1633 struct gfar __iomem *regs = NULL;
1637 for (i = 0; i < priv->num_grps; i++) {
1638 regs = priv->gfargrp[i].regs;
1639 /* Mask all interrupts */
1640 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1642 /* Clear all interrupts */
1643 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
1646 regs = priv->gfargrp[0].regs;
1647 /* Stop the DMA, and wait for it to stop */
1648 tempval = gfar_read(®s->dmactrl);
1649 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1650 (DMACTRL_GRS | DMACTRL_GTS)) {
1653 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1654 gfar_write(®s->dmactrl, tempval);
1657 ret = spin_event_timeout(((gfar_read(®s->ievent) &
1658 (IEVENT_GRSC | IEVENT_GTSC)) ==
1659 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1660 if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC))
1661 ret = __gfar_is_rx_idle(priv);
1666 /* Halt the receive and transmit queues */
1667 void gfar_halt(struct net_device *dev)
1669 struct gfar_private *priv = netdev_priv(dev);
1670 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1673 gfar_halt_nodisable(dev);
1675 /* Disable Rx and Tx */
1676 tempval = gfar_read(®s->maccfg1);
1677 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1678 gfar_write(®s->maccfg1, tempval);
1681 static void free_grp_irqs(struct gfar_priv_grp *grp)
1683 free_irq(gfar_irq(grp, TX)->irq, grp);
1684 free_irq(gfar_irq(grp, RX)->irq, grp);
1685 free_irq(gfar_irq(grp, ER)->irq, grp);
1688 void stop_gfar(struct net_device *dev)
1690 struct gfar_private *priv = netdev_priv(dev);
1691 unsigned long flags;
1694 phy_stop(priv->phydev);
1698 local_irq_save(flags);
1706 local_irq_restore(flags);
1709 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1710 for (i = 0; i < priv->num_grps; i++)
1711 free_grp_irqs(&priv->gfargrp[i]);
1713 for (i = 0; i < priv->num_grps; i++)
1714 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
1718 free_skb_resources(priv);
1721 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1723 struct txbd8 *txbdp;
1724 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1727 txbdp = tx_queue->tx_bd_base;
1729 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1730 if (!tx_queue->tx_skbuff[i])
1733 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1734 txbdp->length, DMA_TO_DEVICE);
1736 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1739 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1740 txbdp->length, DMA_TO_DEVICE);
1743 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1744 tx_queue->tx_skbuff[i] = NULL;
1746 kfree(tx_queue->tx_skbuff);
1747 tx_queue->tx_skbuff = NULL;
1750 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1752 struct rxbd8 *rxbdp;
1753 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1756 rxbdp = rx_queue->rx_bd_base;
1758 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1759 if (rx_queue->rx_skbuff[i]) {
1760 dma_unmap_single(&priv->ofdev->dev,
1761 rxbdp->bufPtr, priv->rx_buffer_size,
1763 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1764 rx_queue->rx_skbuff[i] = NULL;
1770 kfree(rx_queue->rx_skbuff);
1771 rx_queue->rx_skbuff = NULL;
1774 /* If there are any tx skbs or rx skbs still around, free them.
1775 * Then free tx_skbuff and rx_skbuff
1777 static void free_skb_resources(struct gfar_private *priv)
1779 struct gfar_priv_tx_q *tx_queue = NULL;
1780 struct gfar_priv_rx_q *rx_queue = NULL;
1783 /* Go through all the buffer descriptors and free their data buffers */
1784 for (i = 0; i < priv->num_tx_queues; i++) {
1785 struct netdev_queue *txq;
1787 tx_queue = priv->tx_queue[i];
1788 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1789 if (tx_queue->tx_skbuff)
1790 free_skb_tx_queue(tx_queue);
1791 netdev_tx_reset_queue(txq);
1794 for (i = 0; i < priv->num_rx_queues; i++) {
1795 rx_queue = priv->rx_queue[i];
1796 if (rx_queue->rx_skbuff)
1797 free_skb_rx_queue(rx_queue);
1800 dma_free_coherent(&priv->ofdev->dev,
1801 sizeof(struct txbd8) * priv->total_tx_ring_size +
1802 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1803 priv->tx_queue[0]->tx_bd_base,
1804 priv->tx_queue[0]->tx_bd_dma_base);
1807 void gfar_start(struct net_device *dev)
1809 struct gfar_private *priv = netdev_priv(dev);
1810 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1814 /* Enable Rx and Tx in MACCFG1 */
1815 tempval = gfar_read(®s->maccfg1);
1816 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1817 gfar_write(®s->maccfg1, tempval);
1819 /* Initialize DMACTRL to have WWR and WOP */
1820 tempval = gfar_read(®s->dmactrl);
1821 tempval |= DMACTRL_INIT_SETTINGS;
1822 gfar_write(®s->dmactrl, tempval);
1824 /* Make sure we aren't stopped */
1825 tempval = gfar_read(®s->dmactrl);
1826 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1827 gfar_write(®s->dmactrl, tempval);
1829 for (i = 0; i < priv->num_grps; i++) {
1830 regs = priv->gfargrp[i].regs;
1831 /* Clear THLT/RHLT, so that the DMA starts polling now */
1832 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1833 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1834 /* Unmask the interrupts we look for */
1835 gfar_write(®s->imask, IMASK_DEFAULT);
1838 dev->trans_start = jiffies; /* prevent tx timeout */
1841 void gfar_configure_coalescing(struct gfar_private *priv,
1842 unsigned long tx_mask, unsigned long rx_mask)
1844 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1848 /* Backward compatible case ---- even if we enable
1849 * multiple queues, there's only single reg to program
1851 gfar_write(®s->txic, 0);
1852 if (likely(priv->tx_queue[0]->txcoalescing))
1853 gfar_write(®s->txic, priv->tx_queue[0]->txic);
1855 gfar_write(®s->rxic, 0);
1856 if (unlikely(priv->rx_queue[0]->rxcoalescing))
1857 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
1859 if (priv->mode == MQ_MG_MODE) {
1860 baddr = ®s->txic0;
1861 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1862 gfar_write(baddr + i, 0);
1863 if (likely(priv->tx_queue[i]->txcoalescing))
1864 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1867 baddr = ®s->rxic0;
1868 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1869 gfar_write(baddr + i, 0);
1870 if (likely(priv->rx_queue[i]->rxcoalescing))
1871 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1876 static int register_grp_irqs(struct gfar_priv_grp *grp)
1878 struct gfar_private *priv = grp->priv;
1879 struct net_device *dev = priv->ndev;
1882 /* If the device has multiple interrupts, register for
1883 * them. Otherwise, only register for the one
1885 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1886 /* Install our interrupt handlers for Error,
1887 * Transmit, and Receive
1889 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
1890 gfar_irq(grp, ER)->name, grp);
1892 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1893 gfar_irq(grp, ER)->irq);
1897 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
1898 gfar_irq(grp, TX)->name, grp);
1900 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1901 gfar_irq(grp, TX)->irq);
1904 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
1905 gfar_irq(grp, RX)->name, grp);
1907 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1908 gfar_irq(grp, RX)->irq);
1912 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
1913 gfar_irq(grp, TX)->name, grp);
1915 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1916 gfar_irq(grp, TX)->irq);
1924 free_irq(gfar_irq(grp, TX)->irq, grp);
1926 free_irq(gfar_irq(grp, ER)->irq, grp);
1932 /* Bring the controller up and running */
1933 int startup_gfar(struct net_device *ndev)
1935 struct gfar_private *priv = netdev_priv(ndev);
1936 struct gfar __iomem *regs = NULL;
1939 for (i = 0; i < priv->num_grps; i++) {
1940 regs= priv->gfargrp[i].regs;
1941 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1944 regs= priv->gfargrp[0].regs;
1945 err = gfar_alloc_skb_resources(ndev);
1949 gfar_init_mac(ndev);
1951 for (i = 0; i < priv->num_grps; i++) {
1952 err = register_grp_irqs(&priv->gfargrp[i]);
1954 for (j = 0; j < i; j++)
1955 free_grp_irqs(&priv->gfargrp[j]);
1960 /* Start the controller */
1963 phy_start(priv->phydev);
1965 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1970 free_skb_resources(priv);
1974 /* Called when something needs to use the ethernet device
1975 * Returns 0 for success.
1977 static int gfar_enet_open(struct net_device *dev)
1979 struct gfar_private *priv = netdev_priv(dev);
1984 /* Initialize a bunch of registers */
1985 init_registers(dev);
1987 gfar_set_mac_address(dev);
1989 err = init_phy(dev);
1996 err = startup_gfar(dev);
2002 netif_tx_start_all_queues(dev);
2004 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2009 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2011 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2013 memset(fcb, 0, GMAC_FCB_LEN);
2018 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2021 /* If we're here, it's a IP packet with a TCP or UDP
2022 * payload. We set it to checksum, using a pseudo-header
2025 u8 flags = TXFCB_DEFAULT;
2027 /* Tell the controller what the protocol is
2028 * And provide the already calculated phcs
2030 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2032 fcb->phcs = udp_hdr(skb)->check;
2034 fcb->phcs = tcp_hdr(skb)->check;
2036 /* l3os is the distance between the start of the
2037 * frame (skb->data) and the start of the IP hdr.
2038 * l4os is the distance between the start of the
2039 * l3 hdr and the l4 hdr
2041 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2042 fcb->l4os = skb_network_header_len(skb);
2047 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2049 fcb->flags |= TXFCB_VLN;
2050 fcb->vlctl = vlan_tx_tag_get(skb);
2053 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2054 struct txbd8 *base, int ring_size)
2056 struct txbd8 *new_bd = bdp + stride;
2058 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2061 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2064 return skip_txbd(bdp, 1, base, ring_size);
2067 /* This is called by the kernel when a frame is ready for transmission.
2068 * It is pointed to by the dev->hard_start_xmit function pointer
2070 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2072 struct gfar_private *priv = netdev_priv(dev);
2073 struct gfar_priv_tx_q *tx_queue = NULL;
2074 struct netdev_queue *txq;
2075 struct gfar __iomem *regs = NULL;
2076 struct txfcb *fcb = NULL;
2077 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2079 int i, rq = 0, do_tstamp = 0;
2081 unsigned long flags;
2082 unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
2084 /* TOE=1 frames larger than 2500 bytes may see excess delays
2085 * before start of transmission.
2087 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2088 skb->ip_summed == CHECKSUM_PARTIAL &&
2092 ret = skb_checksum_help(skb);
2097 rq = skb->queue_mapping;
2098 tx_queue = priv->tx_queue[rq];
2099 txq = netdev_get_tx_queue(dev, rq);
2100 base = tx_queue->tx_bd_base;
2101 regs = tx_queue->grp->regs;
2103 /* check if time stamp should be generated */
2104 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2105 priv->hwts_tx_en)) {
2107 fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2110 /* make space for additional header when fcb is needed */
2111 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
2112 vlan_tx_tag_present(skb) ||
2113 unlikely(do_tstamp)) &&
2114 (skb_headroom(skb) < fcb_length)) {
2115 struct sk_buff *skb_new;
2117 skb_new = skb_realloc_headroom(skb, fcb_length);
2119 dev->stats.tx_errors++;
2121 return NETDEV_TX_OK;
2125 skb_set_owner_w(skb_new, skb->sk);
2130 /* total number of fragments in the SKB */
2131 nr_frags = skb_shinfo(skb)->nr_frags;
2133 /* calculate the required number of TxBDs for this skb */
2134 if (unlikely(do_tstamp))
2135 nr_txbds = nr_frags + 2;
2137 nr_txbds = nr_frags + 1;
2139 /* check if there is space to queue this packet */
2140 if (nr_txbds > tx_queue->num_txbdfree) {
2141 /* no space, stop the queue */
2142 netif_tx_stop_queue(txq);
2143 dev->stats.tx_fifo_errors++;
2144 return NETDEV_TX_BUSY;
2147 /* Update transmit stats */
2148 tx_queue->stats.tx_bytes += skb->len;
2149 tx_queue->stats.tx_packets++;
2151 txbdp = txbdp_start = tx_queue->cur_tx;
2152 lstatus = txbdp->lstatus;
2154 /* Time stamp insertion requires one additional TxBD */
2155 if (unlikely(do_tstamp))
2156 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2157 tx_queue->tx_ring_size);
2159 if (nr_frags == 0) {
2160 if (unlikely(do_tstamp))
2161 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2164 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2166 /* Place the fragment addresses and lengths into the TxBDs */
2167 for (i = 0; i < nr_frags; i++) {
2168 /* Point at the next BD, wrapping as needed */
2169 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2171 length = skb_shinfo(skb)->frags[i].size;
2173 lstatus = txbdp->lstatus | length |
2174 BD_LFLAG(TXBD_READY);
2176 /* Handle the last BD specially */
2177 if (i == nr_frags - 1)
2178 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2180 bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
2181 &skb_shinfo(skb)->frags[i],
2186 /* set the TxBD length and buffer pointer */
2187 txbdp->bufPtr = bufaddr;
2188 txbdp->lstatus = lstatus;
2191 lstatus = txbdp_start->lstatus;
2194 /* Add TxPAL between FCB and frame if required */
2195 if (unlikely(do_tstamp)) {
2196 skb_push(skb, GMAC_TXPAL_LEN);
2197 memset(skb->data, 0, GMAC_TXPAL_LEN);
2200 /* Set up checksumming */
2201 if (CHECKSUM_PARTIAL == skb->ip_summed) {
2202 fcb = gfar_add_fcb(skb);
2203 /* as specified by errata */
2204 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
2205 ((unsigned long)fcb % 0x20) > 0x18)) {
2206 __skb_pull(skb, GMAC_FCB_LEN);
2207 skb_checksum_help(skb);
2209 lstatus |= BD_LFLAG(TXBD_TOE);
2210 gfar_tx_checksum(skb, fcb, fcb_length);
2214 if (vlan_tx_tag_present(skb)) {
2215 if (unlikely(NULL == fcb)) {
2216 fcb = gfar_add_fcb(skb);
2217 lstatus |= BD_LFLAG(TXBD_TOE);
2220 gfar_tx_vlan(skb, fcb);
2223 /* Setup tx hardware time stamping if requested */
2224 if (unlikely(do_tstamp)) {
2225 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2227 fcb = gfar_add_fcb(skb);
2229 lstatus |= BD_LFLAG(TXBD_TOE);
2232 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2233 skb_headlen(skb), DMA_TO_DEVICE);
2235 /* If time stamping is requested one additional TxBD must be set up. The
2236 * first TxBD points to the FCB and must have a data length of
2237 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2238 * the full frame length.
2240 if (unlikely(do_tstamp)) {
2241 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
2242 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2243 (skb_headlen(skb) - fcb_length);
2244 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2246 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2249 netdev_tx_sent_queue(txq, skb->len);
2251 /* We can work in parallel with gfar_clean_tx_ring(), except
2252 * when modifying num_txbdfree. Note that we didn't grab the lock
2253 * when we were reading the num_txbdfree and checking for available
2254 * space, that's because outside of this function it can only grow,
2255 * and once we've got needed space, it cannot suddenly disappear.
2257 * The lock also protects us from gfar_error(), which can modify
2258 * regs->tstat and thus retrigger the transfers, which is why we
2259 * also must grab the lock before setting ready bit for the first
2260 * to be transmitted BD.
2262 spin_lock_irqsave(&tx_queue->txlock, flags);
2264 /* The powerpc-specific eieio() is used, as wmb() has too strong
2265 * semantics (it requires synchronization between cacheable and
2266 * uncacheable mappings, which eieio doesn't provide and which we
2267 * don't need), thus requiring a more expensive sync instruction. At
2268 * some point, the set of architecture-independent barrier functions
2269 * should be expanded to include weaker barriers.
2273 txbdp_start->lstatus = lstatus;
2275 eieio(); /* force lstatus write before tx_skbuff */
2277 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2279 /* Update the current skb pointer to the next entry we will use
2280 * (wrapping if necessary)
2282 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2283 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2285 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2287 /* reduce TxBD free count */
2288 tx_queue->num_txbdfree -= (nr_txbds);
2290 /* If the next BD still needs to be cleaned up, then the bds
2291 * are full. We need to tell the kernel to stop sending us stuff.
2293 if (!tx_queue->num_txbdfree) {
2294 netif_tx_stop_queue(txq);
2296 dev->stats.tx_fifo_errors++;
2299 /* Tell the DMA to go go go */
2300 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2303 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2305 return NETDEV_TX_OK;
2308 /* Stops the kernel queue, and halts the controller */
2309 static int gfar_close(struct net_device *dev)
2311 struct gfar_private *priv = netdev_priv(dev);
2315 cancel_work_sync(&priv->reset_task);
2318 /* Disconnect from the PHY */
2319 phy_disconnect(priv->phydev);
2320 priv->phydev = NULL;
2322 netif_tx_stop_all_queues(dev);
2327 /* Changes the mac address if the controller is not running. */
2328 static int gfar_set_mac_address(struct net_device *dev)
2330 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2335 /* Check if rx parser should be activated */
2336 void gfar_check_rx_parser_mode(struct gfar_private *priv)
2338 struct gfar __iomem *regs;
2341 regs = priv->gfargrp[0].regs;
2343 tempval = gfar_read(®s->rctrl);
2344 /* If parse is no longer required, then disable parser */
2345 if (tempval & RCTRL_REQ_PARSER)
2346 tempval |= RCTRL_PRSDEP_INIT;
2348 tempval &= ~RCTRL_PRSDEP_INIT;
2349 gfar_write(®s->rctrl, tempval);
2352 /* Enables and disables VLAN insertion/extraction */
2353 void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
2355 struct gfar_private *priv = netdev_priv(dev);
2356 struct gfar __iomem *regs = NULL;
2357 unsigned long flags;
2360 regs = priv->gfargrp[0].regs;
2361 local_irq_save(flags);
2364 if (features & NETIF_F_HW_VLAN_TX) {
2365 /* Enable VLAN tag insertion */
2366 tempval = gfar_read(®s->tctrl);
2367 tempval |= TCTRL_VLINS;
2368 gfar_write(®s->tctrl, tempval);
2370 /* Disable VLAN tag insertion */
2371 tempval = gfar_read(®s->tctrl);
2372 tempval &= ~TCTRL_VLINS;
2373 gfar_write(®s->tctrl, tempval);
2376 if (features & NETIF_F_HW_VLAN_RX) {
2377 /* Enable VLAN tag extraction */
2378 tempval = gfar_read(®s->rctrl);
2379 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2380 gfar_write(®s->rctrl, tempval);
2382 /* Disable VLAN tag extraction */
2383 tempval = gfar_read(®s->rctrl);
2384 tempval &= ~RCTRL_VLEX;
2385 gfar_write(®s->rctrl, tempval);
2387 gfar_check_rx_parser_mode(priv);
2390 gfar_change_mtu(dev, dev->mtu);
2393 local_irq_restore(flags);
2396 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2398 int tempsize, tempval;
2399 struct gfar_private *priv = netdev_priv(dev);
2400 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2401 int oldsize = priv->rx_buffer_size;
2402 int frame_size = new_mtu + ETH_HLEN;
2404 if (gfar_is_vlan_on(priv))
2405 frame_size += VLAN_HLEN;
2407 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2408 netif_err(priv, drv, dev, "Invalid MTU setting\n");
2412 if (gfar_uses_fcb(priv))
2413 frame_size += GMAC_FCB_LEN;
2415 frame_size += priv->padding;
2417 tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2418 INCREMENTAL_BUFFER_SIZE;
2420 /* Only stop and start the controller if it isn't already
2421 * stopped, and we changed something
2423 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2426 priv->rx_buffer_size = tempsize;
2430 gfar_write(®s->mrblr, priv->rx_buffer_size);
2431 gfar_write(®s->maxfrm, priv->rx_buffer_size);
2433 /* If the mtu is larger than the max size for standard
2434 * ethernet frames (ie, a jumbo frame), then set maccfg2
2435 * to allow huge frames, and to check the length
2437 tempval = gfar_read(®s->maccfg2);
2439 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2440 gfar_has_errata(priv, GFAR_ERRATA_74))
2441 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2443 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2445 gfar_write(®s->maccfg2, tempval);
2447 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2453 /* gfar_reset_task gets scheduled when a packet has not been
2454 * transmitted after a set amount of time.
2455 * For now, assume that clearing out all the structures, and
2456 * starting over will fix the problem.
2458 static void gfar_reset_task(struct work_struct *work)
2460 struct gfar_private *priv = container_of(work, struct gfar_private,
2462 struct net_device *dev = priv->ndev;
2464 if (dev->flags & IFF_UP) {
2465 netif_tx_stop_all_queues(dev);
2468 netif_tx_start_all_queues(dev);
2471 netif_tx_schedule_all(dev);
2474 static void gfar_timeout(struct net_device *dev)
2476 struct gfar_private *priv = netdev_priv(dev);
2478 dev->stats.tx_errors++;
2479 schedule_work(&priv->reset_task);
2482 static void gfar_align_skb(struct sk_buff *skb)
2484 /* We need the data buffer to be aligned properly. We will reserve
2485 * as many bytes as needed to align the data properly
2487 skb_reserve(skb, RXBUF_ALIGNMENT -
2488 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2491 /* Interrupt Handler for Transmit complete */
2492 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2494 struct net_device *dev = tx_queue->dev;
2495 struct netdev_queue *txq;
2496 struct gfar_private *priv = netdev_priv(dev);
2497 struct gfar_priv_rx_q *rx_queue = NULL;
2498 struct txbd8 *bdp, *next = NULL;
2499 struct txbd8 *lbdp = NULL;
2500 struct txbd8 *base = tx_queue->tx_bd_base;
2501 struct sk_buff *skb;
2503 int tx_ring_size = tx_queue->tx_ring_size;
2504 int frags = 0, nr_txbds = 0;
2507 int tqi = tx_queue->qindex;
2508 unsigned int bytes_sent = 0;
2512 rx_queue = priv->rx_queue[tqi];
2513 txq = netdev_get_tx_queue(dev, tqi);
2514 bdp = tx_queue->dirty_tx;
2515 skb_dirtytx = tx_queue->skb_dirtytx;
2517 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2518 unsigned long flags;
2520 frags = skb_shinfo(skb)->nr_frags;
2522 /* When time stamping, one additional TxBD must be freed.
2523 * Also, we need to dma_unmap_single() the TxPAL.
2525 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2526 nr_txbds = frags + 2;
2528 nr_txbds = frags + 1;
2530 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2532 lstatus = lbdp->lstatus;
2534 /* Only clean completed frames */
2535 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2536 (lstatus & BD_LENGTH_MASK))
2539 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2540 next = next_txbd(bdp, base, tx_ring_size);
2541 buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2543 buflen = bdp->length;
2545 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2546 buflen, DMA_TO_DEVICE);
2548 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2549 struct skb_shared_hwtstamps shhwtstamps;
2550 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2552 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2553 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2554 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2555 skb_tstamp_tx(skb, &shhwtstamps);
2556 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2560 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2561 bdp = next_txbd(bdp, base, tx_ring_size);
2563 for (i = 0; i < frags; i++) {
2564 dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
2565 bdp->length, DMA_TO_DEVICE);
2566 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2567 bdp = next_txbd(bdp, base, tx_ring_size);
2570 bytes_sent += skb->len;
2572 dev_kfree_skb_any(skb);
2574 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2576 skb_dirtytx = (skb_dirtytx + 1) &
2577 TX_RING_MOD_MASK(tx_ring_size);
2580 spin_lock_irqsave(&tx_queue->txlock, flags);
2581 tx_queue->num_txbdfree += nr_txbds;
2582 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2585 /* If we freed a buffer, we can restart transmission, if necessary */
2586 if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
2587 netif_wake_subqueue(dev, tqi);
2589 /* Update dirty indicators */
2590 tx_queue->skb_dirtytx = skb_dirtytx;
2591 tx_queue->dirty_tx = bdp;
2593 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2598 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2600 unsigned long flags;
2602 spin_lock_irqsave(&gfargrp->grplock, flags);
2603 if (napi_schedule_prep(&gfargrp->napi)) {
2604 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2605 __napi_schedule(&gfargrp->napi);
2607 /* Clear IEVENT, so interrupts aren't called again
2608 * because of the packets that have already arrived.
2610 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2612 spin_unlock_irqrestore(&gfargrp->grplock, flags);
2616 /* Interrupt Handler for Transmit complete */
2617 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2619 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2623 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2624 struct sk_buff *skb)
2626 struct net_device *dev = rx_queue->dev;
2627 struct gfar_private *priv = netdev_priv(dev);
2630 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2631 priv->rx_buffer_size, DMA_FROM_DEVICE);
2632 gfar_init_rxbdp(rx_queue, bdp, buf);
2635 static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2637 struct gfar_private *priv = netdev_priv(dev);
2638 struct sk_buff *skb;
2640 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2644 gfar_align_skb(skb);
2649 struct sk_buff *gfar_new_skb(struct net_device *dev)
2651 return gfar_alloc_skb(dev);
2654 static inline void count_errors(unsigned short status, struct net_device *dev)
2656 struct gfar_private *priv = netdev_priv(dev);
2657 struct net_device_stats *stats = &dev->stats;
2658 struct gfar_extra_stats *estats = &priv->extra_stats;
2660 /* If the packet was truncated, none of the other errors matter */
2661 if (status & RXBD_TRUNCATED) {
2662 stats->rx_length_errors++;
2668 /* Count the errors, if there were any */
2669 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2670 stats->rx_length_errors++;
2672 if (status & RXBD_LARGE)
2677 if (status & RXBD_NONOCTET) {
2678 stats->rx_frame_errors++;
2679 estats->rx_nonoctet++;
2681 if (status & RXBD_CRCERR) {
2682 estats->rx_crcerr++;
2683 stats->rx_crc_errors++;
2685 if (status & RXBD_OVERRUN) {
2686 estats->rx_overrun++;
2687 stats->rx_crc_errors++;
2691 irqreturn_t gfar_receive(int irq, void *grp_id)
2693 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2697 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2699 /* If valid headers were found, and valid sums
2700 * were verified, then we tell the kernel that no
2701 * checksumming is necessary. Otherwise, it is [FIXME]
2703 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2704 skb->ip_summed = CHECKSUM_UNNECESSARY;
2706 skb_checksum_none_assert(skb);
2710 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2711 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2712 int amount_pull, struct napi_struct *napi)
2714 struct gfar_private *priv = netdev_priv(dev);
2715 struct rxfcb *fcb = NULL;
2719 /* fcb is at the beginning if exists */
2720 fcb = (struct rxfcb *)skb->data;
2722 /* Remove the FCB from the skb
2723 * Remove the padded bytes, if there are any
2726 skb_record_rx_queue(skb, fcb->rq);
2727 skb_pull(skb, amount_pull);
2730 /* Get receive timestamp from the skb */
2731 if (priv->hwts_rx_en) {
2732 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2733 u64 *ns = (u64 *) skb->data;
2735 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2736 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2740 skb_pull(skb, priv->padding);
2742 if (dev->features & NETIF_F_RXCSUM)
2743 gfar_rx_checksum(skb, fcb);
2745 /* Tell the skb what kind of packet this is */
2746 skb->protocol = eth_type_trans(skb, dev);
2748 /* There's need to check for NETIF_F_HW_VLAN_RX here.
2749 * Even if vlan rx accel is disabled, on some chips
2750 * RXFCB_VLN is pseudo randomly set.
2752 if (dev->features & NETIF_F_HW_VLAN_RX &&
2753 fcb->flags & RXFCB_VLN)
2754 __vlan_hwaccel_put_tag(skb, fcb->vlctl);
2756 /* Send the packet up the stack */
2757 ret = napi_gro_receive(napi, skb);
2759 if (GRO_DROP == ret)
2760 priv->extra_stats.kernel_dropped++;
2765 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2766 * until the budget/quota has been reached. Returns the number
2769 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2771 struct net_device *dev = rx_queue->dev;
2772 struct rxbd8 *bdp, *base;
2773 struct sk_buff *skb;
2777 struct gfar_private *priv = netdev_priv(dev);
2779 /* Get the first full descriptor */
2780 bdp = rx_queue->cur_rx;
2781 base = rx_queue->rx_bd_base;
2783 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
2785 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2786 struct sk_buff *newskb;
2790 /* Add another skb for the future */
2791 newskb = gfar_new_skb(dev);
2793 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2795 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2796 priv->rx_buffer_size, DMA_FROM_DEVICE);
2798 if (unlikely(!(bdp->status & RXBD_ERR) &&
2799 bdp->length > priv->rx_buffer_size))
2800 bdp->status = RXBD_LARGE;
2802 /* We drop the frame if we failed to allocate a new buffer */
2803 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2804 bdp->status & RXBD_ERR)) {
2805 count_errors(bdp->status, dev);
2807 if (unlikely(!newskb))
2812 /* Increment the number of packets */
2813 rx_queue->stats.rx_packets++;
2817 pkt_len = bdp->length - ETH_FCS_LEN;
2818 /* Remove the FCS from the packet length */
2819 skb_put(skb, pkt_len);
2820 rx_queue->stats.rx_bytes += pkt_len;
2821 skb_record_rx_queue(skb, rx_queue->qindex);
2822 gfar_process_frame(dev, skb, amount_pull,
2823 &rx_queue->grp->napi);
2826 netif_warn(priv, rx_err, dev, "Missing skb!\n");
2827 rx_queue->stats.rx_dropped++;
2828 priv->extra_stats.rx_skbmissing++;
2833 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2835 /* Setup the new bdp */
2836 gfar_new_rxbdp(rx_queue, bdp, newskb);
2838 /* Update to the next pointer */
2839 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2841 /* update to point at the next skb */
2842 rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2843 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2846 /* Update the current rxbd pointer to be the next one */
2847 rx_queue->cur_rx = bdp;
2852 static int gfar_poll(struct napi_struct *napi, int budget)
2854 struct gfar_priv_grp *gfargrp =
2855 container_of(napi, struct gfar_priv_grp, napi);
2856 struct gfar_private *priv = gfargrp->priv;
2857 struct gfar __iomem *regs = gfargrp->regs;
2858 struct gfar_priv_tx_q *tx_queue = NULL;
2859 struct gfar_priv_rx_q *rx_queue = NULL;
2860 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2861 int tx_cleaned = 0, i, left_over_budget = budget;
2862 unsigned long serviced_queues = 0;
2865 num_queues = gfargrp->num_rx_queues;
2866 budget_per_queue = budget/num_queues;
2868 /* Clear IEVENT, so interrupts aren't called again
2869 * because of the packets that have already arrived
2871 gfar_write(®s->ievent, IEVENT_RTX_MASK);
2873 while (num_queues && left_over_budget) {
2874 budget_per_queue = left_over_budget/num_queues;
2875 left_over_budget = 0;
2877 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2878 if (test_bit(i, &serviced_queues))
2880 rx_queue = priv->rx_queue[i];
2881 tx_queue = priv->tx_queue[rx_queue->qindex];
2883 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2884 rx_cleaned_per_queue =
2885 gfar_clean_rx_ring(rx_queue, budget_per_queue);
2886 rx_cleaned += rx_cleaned_per_queue;
2887 if (rx_cleaned_per_queue < budget_per_queue) {
2888 left_over_budget = left_over_budget +
2890 rx_cleaned_per_queue);
2891 set_bit(i, &serviced_queues);
2900 if (rx_cleaned < budget) {
2901 napi_complete(napi);
2903 /* Clear the halt bit in RSTAT */
2904 gfar_write(®s->rstat, gfargrp->rstat);
2906 gfar_write(®s->imask, IMASK_DEFAULT);
2908 /* If we are coalescing interrupts, update the timer
2909 * Otherwise, clear it
2911 gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2912 gfargrp->tx_bit_map);
2918 #ifdef CONFIG_NET_POLL_CONTROLLER
2919 /* Polling 'interrupt' - used by things like netconsole to send skbs
2920 * without having to re-enable interrupts. It's not called while
2921 * the interrupt routine is executing.
2923 static void gfar_netpoll(struct net_device *dev)
2925 struct gfar_private *priv = netdev_priv(dev);
2928 /* If the device has multiple interrupts, run tx/rx */
2929 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2930 for (i = 0; i < priv->num_grps; i++) {
2931 disable_irq(priv->gfargrp[i].interruptTransmit);
2932 disable_irq(priv->gfargrp[i].interruptReceive);
2933 disable_irq(priv->gfargrp[i].interruptError);
2934 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2936 enable_irq(priv->gfargrp[i].interruptError);
2937 enable_irq(priv->gfargrp[i].interruptReceive);
2938 enable_irq(priv->gfargrp[i].interruptTransmit);
2941 for (i = 0; i < priv->num_grps; i++) {
2942 disable_irq(priv->gfargrp[i].interruptTransmit);
2943 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2945 enable_irq(priv->gfargrp[i].interruptTransmit);
2951 /* The interrupt handler for devices with one interrupt */
2952 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2954 struct gfar_priv_grp *gfargrp = grp_id;
2956 /* Save ievent for future reference */
2957 u32 events = gfar_read(&gfargrp->regs->ievent);
2959 /* Check for reception */
2960 if (events & IEVENT_RX_MASK)
2961 gfar_receive(irq, grp_id);
2963 /* Check for transmit completion */
2964 if (events & IEVENT_TX_MASK)
2965 gfar_transmit(irq, grp_id);
2967 /* Check for errors */
2968 if (events & IEVENT_ERR_MASK)
2969 gfar_error(irq, grp_id);
2974 /* Called every time the controller might need to be made
2975 * aware of new link state. The PHY code conveys this
2976 * information through variables in the phydev structure, and this
2977 * function converts those variables into the appropriate
2978 * register values, and can bring down the device if needed.
2980 static void adjust_link(struct net_device *dev)
2982 struct gfar_private *priv = netdev_priv(dev);
2983 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2984 unsigned long flags;
2985 struct phy_device *phydev = priv->phydev;
2988 local_irq_save(flags);
2992 u32 tempval = gfar_read(®s->maccfg2);
2993 u32 ecntrl = gfar_read(®s->ecntrl);
2995 /* Now we make sure that we can be in full duplex mode.
2996 * If not, we operate in half-duplex mode.
2998 if (phydev->duplex != priv->oldduplex) {
3000 if (!(phydev->duplex))
3001 tempval &= ~(MACCFG2_FULL_DUPLEX);
3003 tempval |= MACCFG2_FULL_DUPLEX;
3005 priv->oldduplex = phydev->duplex;
3008 if (phydev->speed != priv->oldspeed) {
3010 switch (phydev->speed) {
3013 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3015 ecntrl &= ~(ECNTRL_R100);
3020 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3022 /* Reduced mode distinguishes
3023 * between 10 and 100
3025 if (phydev->speed == SPEED_100)
3026 ecntrl |= ECNTRL_R100;
3028 ecntrl &= ~(ECNTRL_R100);
3031 netif_warn(priv, link, dev,
3032 "Ack! Speed (%d) is not 10/100/1000!\n",
3037 priv->oldspeed = phydev->speed;
3040 gfar_write(®s->maccfg2, tempval);
3041 gfar_write(®s->ecntrl, ecntrl);
3043 if (!priv->oldlink) {
3047 } else if (priv->oldlink) {
3051 priv->oldduplex = -1;
3054 if (new_state && netif_msg_link(priv))
3055 phy_print_status(phydev);
3057 local_irq_restore(flags);
3060 /* Update the hash table based on the current list of multicast
3061 * addresses we subscribe to. Also, change the promiscuity of
3062 * the device based on the flags (this function is called
3063 * whenever dev->flags is changed
3065 static void gfar_set_multi(struct net_device *dev)
3067 struct netdev_hw_addr *ha;
3068 struct gfar_private *priv = netdev_priv(dev);
3069 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3072 if (dev->flags & IFF_PROMISC) {
3073 /* Set RCTRL to PROM */
3074 tempval = gfar_read(®s->rctrl);
3075 tempval |= RCTRL_PROM;
3076 gfar_write(®s->rctrl, tempval);
3078 /* Set RCTRL to not PROM */
3079 tempval = gfar_read(®s->rctrl);
3080 tempval &= ~(RCTRL_PROM);
3081 gfar_write(®s->rctrl, tempval);
3084 if (dev->flags & IFF_ALLMULTI) {
3085 /* Set the hash to rx all multicast frames */
3086 gfar_write(®s->igaddr0, 0xffffffff);
3087 gfar_write(®s->igaddr1, 0xffffffff);
3088 gfar_write(®s->igaddr2, 0xffffffff);
3089 gfar_write(®s->igaddr3, 0xffffffff);
3090 gfar_write(®s->igaddr4, 0xffffffff);
3091 gfar_write(®s->igaddr5, 0xffffffff);
3092 gfar_write(®s->igaddr6, 0xffffffff);
3093 gfar_write(®s->igaddr7, 0xffffffff);
3094 gfar_write(®s->gaddr0, 0xffffffff);
3095 gfar_write(®s->gaddr1, 0xffffffff);
3096 gfar_write(®s->gaddr2, 0xffffffff);
3097 gfar_write(®s->gaddr3, 0xffffffff);
3098 gfar_write(®s->gaddr4, 0xffffffff);
3099 gfar_write(®s->gaddr5, 0xffffffff);
3100 gfar_write(®s->gaddr6, 0xffffffff);
3101 gfar_write(®s->gaddr7, 0xffffffff);
3106 /* zero out the hash */
3107 gfar_write(®s->igaddr0, 0x0);
3108 gfar_write(®s->igaddr1, 0x0);
3109 gfar_write(®s->igaddr2, 0x0);
3110 gfar_write(®s->igaddr3, 0x0);
3111 gfar_write(®s->igaddr4, 0x0);
3112 gfar_write(®s->igaddr5, 0x0);
3113 gfar_write(®s->igaddr6, 0x0);
3114 gfar_write(®s->igaddr7, 0x0);
3115 gfar_write(®s->gaddr0, 0x0);
3116 gfar_write(®s->gaddr1, 0x0);
3117 gfar_write(®s->gaddr2, 0x0);
3118 gfar_write(®s->gaddr3, 0x0);
3119 gfar_write(®s->gaddr4, 0x0);
3120 gfar_write(®s->gaddr5, 0x0);
3121 gfar_write(®s->gaddr6, 0x0);
3122 gfar_write(®s->gaddr7, 0x0);
3124 /* If we have extended hash tables, we need to
3125 * clear the exact match registers to prepare for
3128 if (priv->extended_hash) {
3129 em_num = GFAR_EM_NUM + 1;
3130 gfar_clear_exact_match(dev);
3137 if (netdev_mc_empty(dev))
3140 /* Parse the list, and set the appropriate bits */
3141 netdev_for_each_mc_addr(ha, dev) {
3143 gfar_set_mac_for_addr(dev, idx, ha->addr);
3146 gfar_set_hash_for_addr(dev, ha->addr);
3152 /* Clears each of the exact match registers to zero, so they
3153 * don't interfere with normal reception
3155 static void gfar_clear_exact_match(struct net_device *dev)
3158 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3160 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3161 gfar_set_mac_for_addr(dev, idx, zero_arr);
3164 /* Set the appropriate hash bit for the given addr */
3165 /* The algorithm works like so:
3166 * 1) Take the Destination Address (ie the multicast address), and
3167 * do a CRC on it (little endian), and reverse the bits of the
3169 * 2) Use the 8 most significant bits as a hash into a 256-entry
3170 * table. The table is controlled through 8 32-bit registers:
3171 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3172 * gaddr7. This means that the 3 most significant bits in the
3173 * hash index which gaddr register to use, and the 5 other bits
3174 * indicate which bit (assuming an IBM numbering scheme, which
3175 * for PowerPC (tm) is usually the case) in the register holds
3178 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3181 struct gfar_private *priv = netdev_priv(dev);
3182 u32 result = ether_crc(ETH_ALEN, addr);
3183 int width = priv->hash_width;
3184 u8 whichbit = (result >> (32 - width)) & 0x1f;
3185 u8 whichreg = result >> (32 - width + 5);
3186 u32 value = (1 << (31-whichbit));
3188 tempval = gfar_read(priv->hash_regs[whichreg]);
3190 gfar_write(priv->hash_regs[whichreg], tempval);
3194 /* There are multiple MAC Address register pairs on some controllers
3195 * This function sets the numth pair to a given address
3197 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3200 struct gfar_private *priv = netdev_priv(dev);
3201 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3203 char tmpbuf[ETH_ALEN];
3205 u32 __iomem *macptr = ®s->macstnaddr1;
3209 /* Now copy it into the mac registers backwards, cuz
3210 * little endian is silly
3212 for (idx = 0; idx < ETH_ALEN; idx++)
3213 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3215 gfar_write(macptr, *((u32 *) (tmpbuf)));
3217 tempval = *((u32 *) (tmpbuf + 4));
3219 gfar_write(macptr+1, tempval);
3222 /* GFAR error interrupt handler */
3223 static irqreturn_t gfar_error(int irq, void *grp_id)
3225 struct gfar_priv_grp *gfargrp = grp_id;
3226 struct gfar __iomem *regs = gfargrp->regs;
3227 struct gfar_private *priv= gfargrp->priv;
3228 struct net_device *dev = priv->ndev;
3230 /* Save ievent for future reference */
3231 u32 events = gfar_read(®s->ievent);
3234 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
3236 /* Magic Packet is not an error. */
3237 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3238 (events & IEVENT_MAG))
3239 events &= ~IEVENT_MAG;
3242 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3244 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3245 events, gfar_read(®s->imask));
3247 /* Update the error counters */
3248 if (events & IEVENT_TXE) {
3249 dev->stats.tx_errors++;
3251 if (events & IEVENT_LC)
3252 dev->stats.tx_window_errors++;
3253 if (events & IEVENT_CRL)
3254 dev->stats.tx_aborted_errors++;
3255 if (events & IEVENT_XFUN) {
3256 unsigned long flags;
3258 netif_dbg(priv, tx_err, dev,
3259 "TX FIFO underrun, packet dropped\n");
3260 dev->stats.tx_dropped++;
3261 priv->extra_stats.tx_underrun++;
3263 local_irq_save(flags);
3266 /* Reactivate the Tx Queues */
3267 gfar_write(®s->tstat, gfargrp->tstat);
3270 local_irq_restore(flags);
3272 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3274 if (events & IEVENT_BSY) {
3275 dev->stats.rx_errors++;
3276 priv->extra_stats.rx_bsy++;
3278 gfar_receive(irq, grp_id);
3280 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3281 gfar_read(®s->rstat));
3283 if (events & IEVENT_BABR) {
3284 dev->stats.rx_errors++;
3285 priv->extra_stats.rx_babr++;
3287 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3289 if (events & IEVENT_EBERR) {
3290 priv->extra_stats.eberr++;
3291 netif_dbg(priv, rx_err, dev, "bus error\n");
3293 if (events & IEVENT_RXC)
3294 netif_dbg(priv, rx_status, dev, "control frame\n");
3296 if (events & IEVENT_BABT) {
3297 priv->extra_stats.tx_babt++;
3298 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3303 static struct of_device_id gfar_match[] =
3307 .compatible = "gianfar",
3310 .compatible = "fsl,etsec2",
3314 MODULE_DEVICE_TABLE(of, gfar_match);
3316 /* Structure for a device driver */
3317 static struct platform_driver gfar_driver = {
3319 .name = "fsl-gianfar",
3320 .owner = THIS_MODULE,
3322 .of_match_table = gfar_match,
3324 .probe = gfar_probe,
3325 .remove = gfar_remove,
3328 module_platform_driver(gfar_driver);