gianfar: Add Multiple Queue Support
[firefly-linux-kernel-4.4.55.git] / drivers / net / gianfar.c
1 /*
2  * drivers/net/gianfar.c
3  *
4  * Gianfar Ethernet Driver
5  * This driver is designed for the non-CPM ethernet controllers
6  * on the 85xx and 83xx family of integrated processors
7  * Based on 8260_io/fcc_enet.c
8  *
9  * Author: Andy Fleming
10  * Maintainer: Kumar Gala
11  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12  *
13  * Copyright 2002-2009 Freescale Semiconductor, Inc.
14  * Copyright 2007 MontaVista Software, Inc.
15  *
16  * This program is free software; you can redistribute  it and/or modify it
17  * under  the terms of  the GNU General  Public License as published by the
18  * Free Software Foundation;  either version 2 of the  License, or (at your
19  * option) any later version.
20  *
21  *  Gianfar:  AKA Lambda Draconis, "Dragon"
22  *  RA 11 31 24.2
23  *  Dec +69 19 52
24  *  V 3.84
25  *  B-V +1.62
26  *
27  *  Theory of operation
28  *
29  *  The driver is initialized through of_device. Configuration information
30  *  is therefore conveyed through an OF-style device tree.
31  *
32  *  The Gianfar Ethernet Controller uses a ring of buffer
33  *  descriptors.  The beginning is indicated by a register
34  *  pointing to the physical address of the start of the ring.
35  *  The end is determined by a "wrap" bit being set in the
36  *  last descriptor of the ring.
37  *
38  *  When a packet is received, the RXF bit in the
39  *  IEVENT register is set, triggering an interrupt when the
40  *  corresponding bit in the IMASK register is also set (if
41  *  interrupt coalescing is active, then the interrupt may not
42  *  happen immediately, but will wait until either a set number
43  *  of frames or amount of time have passed).  In NAPI, the
44  *  interrupt handler will signal there is work to be done, and
45  *  exit. This method will start at the last known empty
46  *  descriptor, and process every subsequent descriptor until there
47  *  are none left with data (NAPI will stop after a set number of
48  *  packets to give time to other tasks, but will eventually
49  *  process all the packets).  The data arrives inside a
50  *  pre-allocated skb, and so after the skb is passed up to the
51  *  stack, a new skb must be allocated, and the address field in
52  *  the buffer descriptor must be updated to indicate this new
53  *  skb.
54  *
55  *  When the kernel requests that a packet be transmitted, the
56  *  driver starts where it left off last time, and points the
57  *  descriptor at the buffer which was passed in.  The driver
58  *  then informs the DMA engine that there are packets ready to
59  *  be transmitted.  Once the controller is finished transmitting
60  *  the packet, an interrupt may be triggered (under the same
61  *  conditions as for reception, but depending on the TXF bit).
62  *  The driver then cleans up the buffer.
63  */
64
65 #include <linux/kernel.h>
66 #include <linux/string.h>
67 #include <linux/errno.h>
68 #include <linux/unistd.h>
69 #include <linux/slab.h>
70 #include <linux/interrupt.h>
71 #include <linux/init.h>
72 #include <linux/delay.h>
73 #include <linux/netdevice.h>
74 #include <linux/etherdevice.h>
75 #include <linux/skbuff.h>
76 #include <linux/if_vlan.h>
77 #include <linux/spinlock.h>
78 #include <linux/mm.h>
79 #include <linux/of_mdio.h>
80 #include <linux/of_platform.h>
81 #include <linux/ip.h>
82 #include <linux/tcp.h>
83 #include <linux/udp.h>
84 #include <linux/in.h>
85
86 #include <asm/io.h>
87 #include <asm/irq.h>
88 #include <asm/uaccess.h>
89 #include <linux/module.h>
90 #include <linux/dma-mapping.h>
91 #include <linux/crc32.h>
92 #include <linux/mii.h>
93 #include <linux/phy.h>
94 #include <linux/phy_fixed.h>
95 #include <linux/of.h>
96
97 #include "gianfar.h"
98 #include "fsl_pq_mdio.h"
99
100 #define TX_TIMEOUT      (1*HZ)
101 #undef BRIEF_GFAR_ERRORS
102 #undef VERBOSE_GFAR_ERRORS
103
104 const char gfar_driver_name[] = "Gianfar Ethernet";
105 const char gfar_driver_version[] = "1.3";
106
107 static int gfar_enet_open(struct net_device *dev);
108 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
109 static void gfar_reset_task(struct work_struct *work);
110 static void gfar_timeout(struct net_device *dev);
111 static int gfar_close(struct net_device *dev);
112 struct sk_buff *gfar_new_skb(struct net_device *dev);
113 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
114                 struct sk_buff *skb);
115 static int gfar_set_mac_address(struct net_device *dev);
116 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
117 static irqreturn_t gfar_error(int irq, void *dev_id);
118 static irqreturn_t gfar_transmit(int irq, void *dev_id);
119 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
120 static void adjust_link(struct net_device *dev);
121 static void init_registers(struct net_device *dev);
122 static int init_phy(struct net_device *dev);
123 static int gfar_probe(struct of_device *ofdev,
124                 const struct of_device_id *match);
125 static int gfar_remove(struct of_device *ofdev);
126 static void free_skb_resources(struct gfar_private *priv);
127 static void gfar_set_multi(struct net_device *dev);
128 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
129 static void gfar_configure_serdes(struct net_device *dev);
130 static int gfar_poll(struct napi_struct *napi, int budget);
131 #ifdef CONFIG_NET_POLL_CONTROLLER
132 static void gfar_netpoll(struct net_device *dev);
133 #endif
134 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
135 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
136 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
137                               int amount_pull);
138 static void gfar_vlan_rx_register(struct net_device *netdev,
139                                 struct vlan_group *grp);
140 void gfar_halt(struct net_device *dev);
141 static void gfar_halt_nodisable(struct net_device *dev);
142 void gfar_start(struct net_device *dev);
143 static void gfar_clear_exact_match(struct net_device *dev);
144 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
145 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
146 u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
147
148 MODULE_AUTHOR("Freescale Semiconductor, Inc");
149 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150 MODULE_LICENSE("GPL");
151
152 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
153                             dma_addr_t buf)
154 {
155         u32 lstatus;
156
157         bdp->bufPtr = buf;
158
159         lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
160         if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
161                 lstatus |= BD_LFLAG(RXBD_WRAP);
162
163         eieio();
164
165         bdp->lstatus = lstatus;
166 }
167
168 static int gfar_init_bds(struct net_device *ndev)
169 {
170         struct gfar_private *priv = netdev_priv(ndev);
171         struct gfar_priv_tx_q *tx_queue = NULL;
172         struct gfar_priv_rx_q *rx_queue = NULL;
173         struct txbd8 *txbdp;
174         struct rxbd8 *rxbdp;
175         int i, j;
176
177         for (i = 0; i < priv->num_tx_queues; i++) {
178                 tx_queue = priv->tx_queue[i];
179                 /* Initialize some variables in our dev structure */
180                 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
181                 tx_queue->dirty_tx = tx_queue->tx_bd_base;
182                 tx_queue->cur_tx = tx_queue->tx_bd_base;
183                 tx_queue->skb_curtx = 0;
184                 tx_queue->skb_dirtytx = 0;
185
186                 /* Initialize Transmit Descriptor Ring */
187                 txbdp = tx_queue->tx_bd_base;
188                 for (j = 0; j < tx_queue->tx_ring_size; j++) {
189                         txbdp->lstatus = 0;
190                         txbdp->bufPtr = 0;
191                         txbdp++;
192                 }
193
194                 /* Set the last descriptor in the ring to indicate wrap */
195                 txbdp--;
196                 txbdp->status |= TXBD_WRAP;
197         }
198
199         for (i = 0; i < priv->num_rx_queues; i++) {
200                 rx_queue = priv->rx_queue[i];
201                 rx_queue->cur_rx = rx_queue->rx_bd_base;
202                 rx_queue->skb_currx = 0;
203                 rxbdp = rx_queue->rx_bd_base;
204
205                 for (j = 0; j < rx_queue->rx_ring_size; j++) {
206                         struct sk_buff *skb = rx_queue->rx_skbuff[j];
207
208                         if (skb) {
209                                 gfar_init_rxbdp(rx_queue, rxbdp,
210                                                 rxbdp->bufPtr);
211                         } else {
212                                 skb = gfar_new_skb(ndev);
213                                 if (!skb) {
214                                         pr_err("%s: Can't allocate RX buffers\n",
215                                                         ndev->name);
216                                         goto err_rxalloc_fail;
217                                 }
218                                 rx_queue->rx_skbuff[j] = skb;
219
220                                 gfar_new_rxbdp(rx_queue, rxbdp, skb);
221                         }
222
223                         rxbdp++;
224                 }
225
226         }
227
228         return 0;
229
230 err_rxalloc_fail:
231         free_skb_resources(priv);
232         return -ENOMEM;
233 }
234
235 static int gfar_alloc_skb_resources(struct net_device *ndev)
236 {
237         void *vaddr;
238         dma_addr_t addr;
239         int i, j, k;
240         struct gfar_private *priv = netdev_priv(ndev);
241         struct device *dev = &priv->ofdev->dev;
242         struct gfar_priv_tx_q *tx_queue = NULL;
243         struct gfar_priv_rx_q *rx_queue = NULL;
244
245         priv->total_tx_ring_size = 0;
246         for (i = 0; i < priv->num_tx_queues; i++)
247                 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
248
249         priv->total_rx_ring_size = 0;
250         for (i = 0; i < priv->num_rx_queues; i++)
251                 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
252
253         /* Allocate memory for the buffer descriptors */
254         vaddr = dma_alloc_coherent(dev,
255                         sizeof(struct txbd8) * priv->total_tx_ring_size +
256                         sizeof(struct rxbd8) * priv->total_rx_ring_size,
257                         &addr, GFP_KERNEL);
258         if (!vaddr) {
259                 if (netif_msg_ifup(priv))
260                         pr_err("%s: Could not allocate buffer descriptors!\n",
261                                ndev->name);
262                 return -ENOMEM;
263         }
264
265         for (i = 0; i < priv->num_tx_queues; i++) {
266                 tx_queue = priv->tx_queue[i];
267                 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
268                 tx_queue->tx_bd_dma_base = addr;
269                 tx_queue->dev = ndev;
270                 /* enet DMA only understands physical addresses */
271                 addr    += sizeof(struct txbd8) *tx_queue->tx_ring_size;
272                 vaddr   += sizeof(struct txbd8) *tx_queue->tx_ring_size;
273         }
274
275         /* Start the rx descriptor ring where the tx ring leaves off */
276         for (i = 0; i < priv->num_rx_queues; i++) {
277                 rx_queue = priv->rx_queue[i];
278                 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
279                 rx_queue->rx_bd_dma_base = addr;
280                 rx_queue->dev = ndev;
281                 addr    += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
282                 vaddr   += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
283         }
284
285         /* Setup the skbuff rings */
286         for (i = 0; i < priv->num_tx_queues; i++) {
287                 tx_queue = priv->tx_queue[i];
288                 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
289                                   tx_queue->tx_ring_size, GFP_KERNEL);
290                 if (!tx_queue->tx_skbuff) {
291                         if (netif_msg_ifup(priv))
292                                 pr_err("%s: Could not allocate tx_skbuff\n",
293                                                 ndev->name);
294                         goto cleanup;
295                 }
296
297                 for (k = 0; k < tx_queue->tx_ring_size; k++)
298                         tx_queue->tx_skbuff[k] = NULL;
299         }
300
301         for (i = 0; i < priv->num_rx_queues; i++) {
302                 rx_queue = priv->rx_queue[i];
303                 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
304                                   rx_queue->rx_ring_size, GFP_KERNEL);
305
306                 if (!rx_queue->rx_skbuff) {
307                         if (netif_msg_ifup(priv))
308                                 pr_err("%s: Could not allocate rx_skbuff\n",
309                                        ndev->name);
310                         goto cleanup;
311                 }
312
313                 for (j = 0; j < rx_queue->rx_ring_size; j++)
314                         rx_queue->rx_skbuff[j] = NULL;
315         }
316
317         if (gfar_init_bds(ndev))
318                 goto cleanup;
319
320         return 0;
321
322 cleanup:
323         free_skb_resources(priv);
324         return -ENOMEM;
325 }
326
327 static void gfar_init_tx_rx_base(struct gfar_private *priv)
328 {
329         struct gfar __iomem *regs = priv->gfargrp.regs;
330         u32 *baddr;
331         int i;
332
333         baddr = &regs->tbase0;
334         for(i = 0; i < priv->num_tx_queues; i++) {
335                 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
336                 baddr   += 2;
337         }
338
339         baddr = &regs->rbase0;
340         for(i = 0; i < priv->num_rx_queues; i++) {
341                 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
342                 baddr   += 2;
343         }
344 }
345
346 static void gfar_init_mac(struct net_device *ndev)
347 {
348         struct gfar_private *priv = netdev_priv(ndev);
349         struct gfar __iomem *regs = priv->gfargrp.regs;
350         u32 rctrl = 0;
351         u32 tctrl = 0;
352         u32 attrs = 0;
353
354         /* write the tx/rx base registers */
355         gfar_init_tx_rx_base(priv);
356
357         /* Configure the coalescing support */
358         gfar_write(&regs->txic, 0);
359         if (priv->tx_queue[0]->txcoalescing)
360                 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
361
362         gfar_write(&regs->rxic, 0);
363         if (priv->rx_queue[0]->rxcoalescing)
364                 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
365
366         if (priv->rx_filer_enable)
367                 rctrl |= RCTRL_FILREN;
368
369         if (priv->rx_csum_enable)
370                 rctrl |= RCTRL_CHECKSUMMING;
371
372         if (priv->extended_hash) {
373                 rctrl |= RCTRL_EXTHASH;
374
375                 gfar_clear_exact_match(ndev);
376                 rctrl |= RCTRL_EMEN;
377         }
378
379         if (priv->padding) {
380                 rctrl &= ~RCTRL_PAL_MASK;
381                 rctrl |= RCTRL_PADDING(priv->padding);
382         }
383
384         /* keep vlan related bits if it's enabled */
385         if (priv->vlgrp) {
386                 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
387                 tctrl |= TCTRL_VLINS;
388         }
389
390         /* Init rctrl based on our settings */
391         gfar_write(&regs->rctrl, rctrl);
392
393         if (ndev->features & NETIF_F_IP_CSUM)
394                 tctrl |= TCTRL_INIT_CSUM;
395
396         tctrl |= TCTRL_TXSCHED_PRIO;
397
398         gfar_write(&regs->tctrl, tctrl);
399
400         /* Set the extraction length and index */
401         attrs = ATTRELI_EL(priv->rx_stash_size) |
402                 ATTRELI_EI(priv->rx_stash_index);
403
404         gfar_write(&regs->attreli, attrs);
405
406         /* Start with defaults, and add stashing or locking
407          * depending on the approprate variables */
408         attrs = ATTR_INIT_SETTINGS;
409
410         if (priv->bd_stash_en)
411                 attrs |= ATTR_BDSTASH;
412
413         if (priv->rx_stash_size != 0)
414                 attrs |= ATTR_BUFSTASH;
415
416         gfar_write(&regs->attr, attrs);
417
418         gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
419         gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
420         gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
421 }
422
423 static const struct net_device_ops gfar_netdev_ops = {
424         .ndo_open = gfar_enet_open,
425         .ndo_start_xmit = gfar_start_xmit,
426         .ndo_stop = gfar_close,
427         .ndo_change_mtu = gfar_change_mtu,
428         .ndo_set_multicast_list = gfar_set_multi,
429         .ndo_tx_timeout = gfar_timeout,
430         .ndo_do_ioctl = gfar_ioctl,
431         .ndo_select_queue = gfar_select_queue,
432         .ndo_vlan_rx_register = gfar_vlan_rx_register,
433         .ndo_set_mac_address = eth_mac_addr,
434         .ndo_validate_addr = eth_validate_addr,
435 #ifdef CONFIG_NET_POLL_CONTROLLER
436         .ndo_poll_controller = gfar_netpoll,
437 #endif
438 };
439
440 void lock_rx_qs(struct gfar_private *priv)
441 {
442         int i = 0x0;
443
444         for (i = 0; i < priv->num_rx_queues; i++)
445                 spin_lock(&priv->rx_queue[i]->rxlock);
446 }
447
448 void lock_tx_qs(struct gfar_private *priv)
449 {
450         int i = 0x0;
451
452         for (i = 0; i < priv->num_tx_queues; i++)
453                 spin_lock(&priv->tx_queue[i]->txlock);
454 }
455
456 void unlock_rx_qs(struct gfar_private *priv)
457 {
458         int i = 0x0;
459
460         for (i = 0; i < priv->num_rx_queues; i++)
461                 spin_unlock(&priv->rx_queue[i]->rxlock);
462 }
463
464 void unlock_tx_qs(struct gfar_private *priv)
465 {
466         int i = 0x0;
467
468         for (i = 0; i < priv->num_tx_queues; i++)
469                 spin_unlock(&priv->tx_queue[i]->txlock);
470 }
471
472 /* Returns 1 if incoming frames use an FCB */
473 static inline int gfar_uses_fcb(struct gfar_private *priv)
474 {
475         return priv->vlgrp || priv->rx_csum_enable;
476 }
477
478 u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
479 {
480         return skb_get_queue_mapping(skb);
481 }
482 static void free_tx_pointers(struct gfar_private *priv)
483 {
484         int i = 0;
485
486         for (i = 0; i < priv->num_tx_queues; i++)
487                 kfree(priv->tx_queue[i]);
488 }
489
490 static void free_rx_pointers(struct gfar_private *priv)
491 {
492         int i = 0;
493
494         for (i = 0; i < priv->num_rx_queues; i++)
495                 kfree(priv->rx_queue[i]);
496 }
497
498 static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
499 {
500         const char *model;
501         const char *ctype;
502         const void *mac_addr;
503         u64 addr, size;
504         int err = 0, i;
505         struct net_device *dev = NULL;
506         struct gfar_private *priv = NULL;
507         struct device_node *np = ofdev->node;
508         const u32 *stash;
509         const u32 *stash_len;
510         const u32 *stash_idx;
511         unsigned int num_tx_qs, num_rx_qs;
512         u32 *tx_queues, *rx_queues;
513
514         if (!np || !of_device_is_available(np))
515                 return -ENODEV;
516
517         /* parse the num of tx and rx queues */
518         tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
519         num_tx_qs = tx_queues ? *tx_queues : 1;
520
521         if (num_tx_qs > MAX_TX_QS) {
522                 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
523                                 num_tx_qs, MAX_TX_QS);
524                 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
525                 return -EINVAL;
526         }
527
528         rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
529         num_rx_qs = rx_queues ? *rx_queues : 1;
530
531         if (num_rx_qs > MAX_RX_QS) {
532                 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
533                                 num_tx_qs, MAX_TX_QS);
534                 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
535                 return -EINVAL;
536         }
537
538         *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
539         dev = *pdev;
540         if (NULL == dev)
541                 return -ENOMEM;
542
543         priv = netdev_priv(dev);
544         priv->node = ofdev->node;
545         priv->ndev = dev;
546
547         dev->num_tx_queues = num_tx_qs;
548         dev->real_num_tx_queues = num_tx_qs;
549         priv->num_tx_queues = num_tx_qs;
550         priv->num_rx_queues = num_rx_qs;
551
552         /* get a pointer to the register memory */
553         addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
554         priv->gfargrp.regs = ioremap(addr, size);
555
556         if (priv->gfargrp.regs == NULL) {
557                 err = -ENOMEM;
558                 goto err_out;
559         }
560
561         priv->gfargrp.priv = priv; /* back pointer from group to priv */
562         priv->gfargrp.rx_bit_map = DEFAULT_MAPPING;
563         priv->gfargrp.tx_bit_map = DEFAULT_MAPPING;
564
565         priv->gfargrp.interruptTransmit = irq_of_parse_and_map(np, 0);
566
567         model = of_get_property(np, "model", NULL);
568
569         /* If we aren't the FEC we have multiple interrupts */
570         if (model && strcasecmp(model, "FEC")) {
571                 priv->gfargrp.interruptReceive = irq_of_parse_and_map(np, 1);
572
573                 priv->gfargrp.interruptError = irq_of_parse_and_map(np, 2);
574
575                 if (priv->gfargrp.interruptTransmit < 0 ||
576                                 priv->gfargrp.interruptReceive < 0 ||
577                                 priv->gfargrp.interruptError < 0) {
578                         err = -EINVAL;
579                         goto err_out;
580                 }
581         }
582
583         for (i = 0; i < priv->num_tx_queues; i++)
584                priv->tx_queue[i] = NULL;
585         for (i = 0; i < priv->num_rx_queues; i++)
586                 priv->rx_queue[i] = NULL;
587
588         for (i = 0; i < priv->num_tx_queues; i++) {
589                 priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kmalloc(
590                                 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
591                 if (!priv->tx_queue[i]) {
592                         err = -ENOMEM;
593                         goto tx_alloc_failed;
594                 }
595                 priv->tx_queue[i]->tx_skbuff = NULL;
596                 priv->tx_queue[i]->qindex = i;
597                 priv->tx_queue[i]->dev = dev;
598                 spin_lock_init(&(priv->tx_queue[i]->txlock));
599         }
600
601         for (i = 0; i < priv->num_rx_queues; i++) {
602                 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
603                                         sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
604                 if (!priv->rx_queue[i]) {
605                         err = -ENOMEM;
606                         goto rx_alloc_failed;
607                 }
608                 priv->rx_queue[i]->rx_skbuff = NULL;
609                 priv->rx_queue[i]->qindex = i;
610                 priv->rx_queue[i]->dev = dev;
611                 spin_lock_init(&(priv->rx_queue[i]->rxlock));
612         }
613
614
615         stash = of_get_property(np, "bd-stash", NULL);
616
617         if (stash) {
618                 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
619                 priv->bd_stash_en = 1;
620         }
621
622         stash_len = of_get_property(np, "rx-stash-len", NULL);
623
624         if (stash_len)
625                 priv->rx_stash_size = *stash_len;
626
627         stash_idx = of_get_property(np, "rx-stash-idx", NULL);
628
629         if (stash_idx)
630                 priv->rx_stash_index = *stash_idx;
631
632         if (stash_len || stash_idx)
633                 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
634
635         mac_addr = of_get_mac_address(np);
636         if (mac_addr)
637                 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
638
639         if (model && !strcasecmp(model, "TSEC"))
640                 priv->device_flags =
641                         FSL_GIANFAR_DEV_HAS_GIGABIT |
642                         FSL_GIANFAR_DEV_HAS_COALESCE |
643                         FSL_GIANFAR_DEV_HAS_RMON |
644                         FSL_GIANFAR_DEV_HAS_MULTI_INTR;
645         if (model && !strcasecmp(model, "eTSEC"))
646                 priv->device_flags =
647                         FSL_GIANFAR_DEV_HAS_GIGABIT |
648                         FSL_GIANFAR_DEV_HAS_COALESCE |
649                         FSL_GIANFAR_DEV_HAS_RMON |
650                         FSL_GIANFAR_DEV_HAS_MULTI_INTR |
651                         FSL_GIANFAR_DEV_HAS_PADDING |
652                         FSL_GIANFAR_DEV_HAS_CSUM |
653                         FSL_GIANFAR_DEV_HAS_VLAN |
654                         FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
655                         FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
656
657         ctype = of_get_property(np, "phy-connection-type", NULL);
658
659         /* We only care about rgmii-id.  The rest are autodetected */
660         if (ctype && !strcmp(ctype, "rgmii-id"))
661                 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
662         else
663                 priv->interface = PHY_INTERFACE_MODE_MII;
664
665         if (of_get_property(np, "fsl,magic-packet", NULL))
666                 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
667
668         priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
669
670         /* Find the TBI PHY.  If it's not there, we don't support SGMII */
671         priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
672
673         return 0;
674
675 rx_alloc_failed:
676         free_rx_pointers(priv);
677 tx_alloc_failed:
678         free_tx_pointers(priv);
679 err_out:
680         iounmap(priv->gfargrp.regs);
681         free_netdev(dev);
682         return err;
683 }
684
685 /* Ioctl MII Interface */
686 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
687 {
688         struct gfar_private *priv = netdev_priv(dev);
689
690         if (!netif_running(dev))
691                 return -EINVAL;
692
693         if (!priv->phydev)
694                 return -ENODEV;
695
696         return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
697 }
698
699 static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
700 {
701         unsigned int new_bit_map = 0x0;
702         int mask = 0x1 << (max_qs - 1), i;
703         for (i = 0; i < max_qs; i++) {
704                 if (bit_map & mask)
705                         new_bit_map = new_bit_map + (1 << i);
706                 mask = mask >> 0x1;
707         }
708         return new_bit_map;
709 }
710 /* Set up the ethernet device structure, private data,
711  * and anything else we need before we start */
712 static int gfar_probe(struct of_device *ofdev,
713                 const struct of_device_id *match)
714 {
715         u32 tempval;
716         struct net_device *dev = NULL;
717         struct gfar_private *priv = NULL;
718         struct gfar __iomem *regs = NULL;
719         int err = 0, i;
720         int len_devname;
721         u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
722
723         err = gfar_of_init(ofdev, &dev);
724
725         if (err)
726                 return err;
727
728         priv = netdev_priv(dev);
729         priv->ndev = dev;
730         priv->ofdev = ofdev;
731         priv->node = ofdev->node;
732         SET_NETDEV_DEV(dev, &ofdev->dev);
733
734         spin_lock_init(&priv->gfargrp.grplock);
735         spin_lock_init(&priv->bflock);
736         INIT_WORK(&priv->reset_task, gfar_reset_task);
737
738         dev_set_drvdata(&ofdev->dev, priv);
739         regs = priv->gfargrp.regs;
740
741         /* Stop the DMA engine now, in case it was running before */
742         /* (The firmware could have used it, and left it running). */
743         gfar_halt(dev);
744
745         /* Reset MAC layer */
746         gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
747
748         /* We need to delay at least 3 TX clocks */
749         udelay(2);
750
751         tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
752         gfar_write(&regs->maccfg1, tempval);
753
754         /* Initialize MACCFG2. */
755         gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
756
757         /* Initialize ECNTRL */
758         gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
759
760         /* Set the dev->base_addr to the gfar reg region */
761         dev->base_addr = (unsigned long) regs;
762
763         SET_NETDEV_DEV(dev, &ofdev->dev);
764
765         /* Fill in the dev structure */
766         dev->watchdog_timeo = TX_TIMEOUT;
767         dev->mtu = 1500;
768         dev->netdev_ops = &gfar_netdev_ops;
769         dev->ethtool_ops = &gfar_ethtool_ops;
770
771         /* Register for napi ...We are registering NAPI for each grp */
772         netif_napi_add(dev, &priv->gfargrp.napi, gfar_poll, GFAR_DEV_WEIGHT);
773
774         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
775                 priv->rx_csum_enable = 1;
776                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
777         } else
778                 priv->rx_csum_enable = 0;
779
780         priv->vlgrp = NULL;
781
782         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
783                 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
784
785         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
786                 priv->extended_hash = 1;
787                 priv->hash_width = 9;
788
789                 priv->hash_regs[0] = &regs->igaddr0;
790                 priv->hash_regs[1] = &regs->igaddr1;
791                 priv->hash_regs[2] = &regs->igaddr2;
792                 priv->hash_regs[3] = &regs->igaddr3;
793                 priv->hash_regs[4] = &regs->igaddr4;
794                 priv->hash_regs[5] = &regs->igaddr5;
795                 priv->hash_regs[6] = &regs->igaddr6;
796                 priv->hash_regs[7] = &regs->igaddr7;
797                 priv->hash_regs[8] = &regs->gaddr0;
798                 priv->hash_regs[9] = &regs->gaddr1;
799                 priv->hash_regs[10] = &regs->gaddr2;
800                 priv->hash_regs[11] = &regs->gaddr3;
801                 priv->hash_regs[12] = &regs->gaddr4;
802                 priv->hash_regs[13] = &regs->gaddr5;
803                 priv->hash_regs[14] = &regs->gaddr6;
804                 priv->hash_regs[15] = &regs->gaddr7;
805
806         } else {
807                 priv->extended_hash = 0;
808                 priv->hash_width = 8;
809
810                 priv->hash_regs[0] = &regs->gaddr0;
811                 priv->hash_regs[1] = &regs->gaddr1;
812                 priv->hash_regs[2] = &regs->gaddr2;
813                 priv->hash_regs[3] = &regs->gaddr3;
814                 priv->hash_regs[4] = &regs->gaddr4;
815                 priv->hash_regs[5] = &regs->gaddr5;
816                 priv->hash_regs[6] = &regs->gaddr6;
817                 priv->hash_regs[7] = &regs->gaddr7;
818         }
819
820         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
821                 priv->padding = DEFAULT_PADDING;
822         else
823                 priv->padding = 0;
824
825         if (dev->features & NETIF_F_IP_CSUM)
826                 dev->hard_header_len += GMAC_FCB_LEN;
827
828         /* Need to reverse the bit maps as  bit_map's MSB is q0
829          * but, for_each_bit parses from right to left, which
830          * basically reverses the queue numbers */
831         priv->gfargrp.tx_bit_map = reverse_bitmap(priv->gfargrp.tx_bit_map, MAX_TX_QS);
832         priv->gfargrp.rx_bit_map = reverse_bitmap(priv->gfargrp.rx_bit_map, MAX_RX_QS);
833
834         /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values */
835         for_each_bit(i, &priv->gfargrp.rx_bit_map, priv->num_rx_queues) {
836                 priv->gfargrp.num_rx_queues++;
837                 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
838                 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
839         }
840         for_each_bit (i, &priv->gfargrp.tx_bit_map, priv->num_tx_queues) {
841                 priv->gfargrp.num_tx_queues++;
842                 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
843                 tqueue = tqueue | (TQUEUE_EN0 >> i);
844         }
845         priv->gfargrp.rstat = rstat;
846         priv->gfargrp.tstat = tstat;
847
848         gfar_write(&regs->rqueue, rqueue);
849         gfar_write(&regs->tqueue, tqueue);
850
851         priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
852
853         /* Initializing some of the rx/tx queue level parameters */
854         for (i = 0; i < priv->num_tx_queues; i++) {
855                 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
856                 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
857                 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
858                 priv->tx_queue[i]->txic = DEFAULT_TXIC;
859         }
860
861         for (i = 0; i < priv->num_rx_queues; i++) {
862                 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
863                 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
864                 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
865         }
866
867         /* Enable most messages by default */
868         priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
869
870         /* Carrier starts down, phylib will bring it up */
871         netif_carrier_off(dev);
872
873         err = register_netdev(dev);
874
875         if (err) {
876                 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
877                                 dev->name);
878                 goto register_fail;
879         }
880
881         device_init_wakeup(&dev->dev,
882                 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
883
884         /* fill out IRQ number and name fields */
885         len_devname = strlen(dev->name);
886         strncpy(&priv->gfargrp.int_name_tx[0], dev->name, len_devname);
887         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
888                 strncpy(&priv->gfargrp.int_name_tx[len_devname],
889                         "_tx", sizeof("_tx") + 1);
890
891                 strncpy(&priv->gfargrp.int_name_rx[0], dev->name, len_devname);
892                 strncpy(&priv->gfargrp.int_name_rx[len_devname],
893                         "_rx", sizeof("_rx") + 1);
894
895                 strncpy(&priv->gfargrp.int_name_er[0], dev->name, len_devname);
896                 strncpy(&priv->gfargrp.int_name_er[len_devname],
897                         "_er", sizeof("_er") + 1);
898         } else
899                 priv->gfargrp.int_name_tx[len_devname] = '\0';
900
901         /* Create all the sysfs files */
902         gfar_init_sysfs(dev);
903
904         /* Print out the device info */
905         printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
906
907         /* Even more device info helps when determining which kernel */
908         /* provided which set of benchmarks. */
909         printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
910         for (i = 0; i < priv->num_rx_queues; i++)
911                 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
912                         dev->name, i, priv->rx_queue[i]->rx_ring_size);
913         for(i = 0; i < priv->num_tx_queues; i++)
914                  printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
915                         dev->name, i, priv->tx_queue[i]->tx_ring_size);
916
917         return 0;
918
919 register_fail:
920         iounmap(priv->gfargrp.regs);
921         free_tx_pointers(priv);
922         free_rx_pointers(priv);
923         if (priv->phy_node)
924                 of_node_put(priv->phy_node);
925         if (priv->tbi_node)
926                 of_node_put(priv->tbi_node);
927         free_netdev(dev);
928         return err;
929 }
930
931 static int gfar_remove(struct of_device *ofdev)
932 {
933         struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
934
935         if (priv->phy_node)
936                 of_node_put(priv->phy_node);
937         if (priv->tbi_node)
938                 of_node_put(priv->tbi_node);
939
940         dev_set_drvdata(&ofdev->dev, NULL);
941
942         unregister_netdev(priv->ndev);
943         iounmap(priv->gfargrp.regs);
944         free_netdev(priv->ndev);
945
946         return 0;
947 }
948
949 #ifdef CONFIG_PM
950
951 static int gfar_suspend(struct device *dev)
952 {
953         struct gfar_private *priv = dev_get_drvdata(dev);
954         struct net_device *ndev = priv->ndev;
955         struct gfar __iomem *regs = NULL;
956         unsigned long flags;
957         u32 tempval;
958
959         int magic_packet = priv->wol_en &&
960                 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
961
962         netif_device_detach(ndev);
963         regs = priv->gfargrp.regs;
964
965         if (netif_running(ndev)) {
966
967                 local_irq_save(flags);
968                 lock_tx_qs(priv);
969                 lock_rx_qs(priv);
970
971                 gfar_halt_nodisable(ndev);
972
973                 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
974                 tempval = gfar_read(&regs->maccfg1);
975
976                 tempval &= ~MACCFG1_TX_EN;
977
978                 if (!magic_packet)
979                         tempval &= ~MACCFG1_RX_EN;
980
981                 gfar_write(&regs->maccfg1, tempval);
982
983                 unlock_rx_qs(priv);
984                 unlock_tx_qs(priv);
985                 local_irq_restore(flags);
986
987                 napi_disable(&priv->gfargrp.napi);
988
989                 if (magic_packet) {
990                         /* Enable interrupt on Magic Packet */
991                         gfar_write(&regs->imask, IMASK_MAG);
992
993                         /* Enable Magic Packet mode */
994                         tempval = gfar_read(&regs->maccfg2);
995                         tempval |= MACCFG2_MPEN;
996                         gfar_write(&regs->maccfg2, tempval);
997                 } else {
998                         phy_stop(priv->phydev);
999                 }
1000         }
1001
1002         return 0;
1003 }
1004
1005 static int gfar_resume(struct device *dev)
1006 {
1007         struct gfar_private *priv = dev_get_drvdata(dev);
1008         struct net_device *ndev = priv->ndev;
1009         struct gfar __iomem *regs = NULL;
1010         unsigned long flags;
1011         u32 tempval;
1012         int magic_packet = priv->wol_en &&
1013                 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1014
1015         if (!netif_running(ndev)) {
1016                 netif_device_attach(ndev);
1017                 return 0;
1018         }
1019
1020         if (!magic_packet && priv->phydev)
1021                 phy_start(priv->phydev);
1022
1023         /* Disable Magic Packet mode, in case something
1024          * else woke us up.
1025          */
1026         regs = priv->gfargrp.regs;
1027
1028         local_irq_save(flags);
1029         lock_tx_qs(priv);
1030         lock_rx_qs(priv);
1031
1032         tempval = gfar_read(&regs->maccfg2);
1033         tempval &= ~MACCFG2_MPEN;
1034         gfar_write(&regs->maccfg2, tempval);
1035
1036         gfar_start(ndev);
1037
1038         unlock_rx_qs(priv);
1039         unlock_tx_qs(priv);
1040         local_irq_restore(flags);
1041
1042         netif_device_attach(ndev);
1043
1044         napi_enable(&priv->gfargrp.napi);
1045
1046         return 0;
1047 }
1048
1049 static int gfar_restore(struct device *dev)
1050 {
1051         struct gfar_private *priv = dev_get_drvdata(dev);
1052         struct net_device *ndev = priv->ndev;
1053
1054         if (!netif_running(ndev))
1055                 return 0;
1056
1057         gfar_init_bds(ndev);
1058         init_registers(ndev);
1059         gfar_set_mac_address(ndev);
1060         gfar_init_mac(ndev);
1061         gfar_start(ndev);
1062
1063         priv->oldlink = 0;
1064         priv->oldspeed = 0;
1065         priv->oldduplex = -1;
1066
1067         if (priv->phydev)
1068                 phy_start(priv->phydev);
1069
1070         netif_device_attach(ndev);
1071         napi_enable(&priv->gfargrp.napi);
1072
1073         return 0;
1074 }
1075
1076 static struct dev_pm_ops gfar_pm_ops = {
1077         .suspend = gfar_suspend,
1078         .resume = gfar_resume,
1079         .freeze = gfar_suspend,
1080         .thaw = gfar_resume,
1081         .restore = gfar_restore,
1082 };
1083
1084 #define GFAR_PM_OPS (&gfar_pm_ops)
1085
1086 static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
1087 {
1088         return gfar_suspend(&ofdev->dev);
1089 }
1090
1091 static int gfar_legacy_resume(struct of_device *ofdev)
1092 {
1093         return gfar_resume(&ofdev->dev);
1094 }
1095
1096 #else
1097
1098 #define GFAR_PM_OPS NULL
1099 #define gfar_legacy_suspend NULL
1100 #define gfar_legacy_resume NULL
1101
1102 #endif
1103
1104 /* Reads the controller's registers to determine what interface
1105  * connects it to the PHY.
1106  */
1107 static phy_interface_t gfar_get_interface(struct net_device *dev)
1108 {
1109         struct gfar_private *priv = netdev_priv(dev);
1110         struct gfar __iomem *regs = NULL;
1111         u32 ecntrl;
1112
1113         regs = priv->gfargrp.regs;
1114         ecntrl = gfar_read(&regs->ecntrl);
1115
1116         if (ecntrl & ECNTRL_SGMII_MODE)
1117                 return PHY_INTERFACE_MODE_SGMII;
1118
1119         if (ecntrl & ECNTRL_TBI_MODE) {
1120                 if (ecntrl & ECNTRL_REDUCED_MODE)
1121                         return PHY_INTERFACE_MODE_RTBI;
1122                 else
1123                         return PHY_INTERFACE_MODE_TBI;
1124         }
1125
1126         if (ecntrl & ECNTRL_REDUCED_MODE) {
1127                 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1128                         return PHY_INTERFACE_MODE_RMII;
1129                 else {
1130                         phy_interface_t interface = priv->interface;
1131
1132                         /*
1133                          * This isn't autodetected right now, so it must
1134                          * be set by the device tree or platform code.
1135                          */
1136                         if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1137                                 return PHY_INTERFACE_MODE_RGMII_ID;
1138
1139                         return PHY_INTERFACE_MODE_RGMII;
1140                 }
1141         }
1142
1143         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1144                 return PHY_INTERFACE_MODE_GMII;
1145
1146         return PHY_INTERFACE_MODE_MII;
1147 }
1148
1149
1150 /* Initializes driver's PHY state, and attaches to the PHY.
1151  * Returns 0 on success.
1152  */
1153 static int init_phy(struct net_device *dev)
1154 {
1155         struct gfar_private *priv = netdev_priv(dev);
1156         uint gigabit_support =
1157                 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1158                 SUPPORTED_1000baseT_Full : 0;
1159         phy_interface_t interface;
1160
1161         priv->oldlink = 0;
1162         priv->oldspeed = 0;
1163         priv->oldduplex = -1;
1164
1165         interface = gfar_get_interface(dev);
1166
1167         priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1168                                       interface);
1169         if (!priv->phydev)
1170                 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1171                                                          interface);
1172         if (!priv->phydev) {
1173                 dev_err(&dev->dev, "could not attach to PHY\n");
1174                 return -ENODEV;
1175         }
1176
1177         if (interface == PHY_INTERFACE_MODE_SGMII)
1178                 gfar_configure_serdes(dev);
1179
1180         /* Remove any features not supported by the controller */
1181         priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1182         priv->phydev->advertising = priv->phydev->supported;
1183
1184         return 0;
1185 }
1186
1187 /*
1188  * Initialize TBI PHY interface for communicating with the
1189  * SERDES lynx PHY on the chip.  We communicate with this PHY
1190  * through the MDIO bus on each controller, treating it as a
1191  * "normal" PHY at the address found in the TBIPA register.  We assume
1192  * that the TBIPA register is valid.  Either the MDIO bus code will set
1193  * it to a value that doesn't conflict with other PHYs on the bus, or the
1194  * value doesn't matter, as there are no other PHYs on the bus.
1195  */
1196 static void gfar_configure_serdes(struct net_device *dev)
1197 {
1198         struct gfar_private *priv = netdev_priv(dev);
1199         struct phy_device *tbiphy;
1200
1201         if (!priv->tbi_node) {
1202                 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1203                                     "device tree specify a tbi-handle\n");
1204                 return;
1205         }
1206
1207         tbiphy = of_phy_find_device(priv->tbi_node);
1208         if (!tbiphy) {
1209                 dev_err(&dev->dev, "error: Could not get TBI device\n");
1210                 return;
1211         }
1212
1213         /*
1214          * If the link is already up, we must already be ok, and don't need to
1215          * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1216          * everything for us?  Resetting it takes the link down and requires
1217          * several seconds for it to come back.
1218          */
1219         if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1220                 return;
1221
1222         /* Single clk mode, mii mode off(for serdes communication) */
1223         phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1224
1225         phy_write(tbiphy, MII_ADVERTISE,
1226                         ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1227                         ADVERTISE_1000XPSE_ASYM);
1228
1229         phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
1230                         BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1231 }
1232
1233 static void init_registers(struct net_device *dev)
1234 {
1235         struct gfar_private *priv = netdev_priv(dev);
1236         struct gfar __iomem *regs = NULL;
1237
1238         regs = priv->gfargrp.regs;
1239         /* Clear IEVENT */
1240         gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1241
1242         /* Initialize IMASK */
1243         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1244
1245         /* Init hash registers to zero */
1246         gfar_write(&regs->igaddr0, 0);
1247         gfar_write(&regs->igaddr1, 0);
1248         gfar_write(&regs->igaddr2, 0);
1249         gfar_write(&regs->igaddr3, 0);
1250         gfar_write(&regs->igaddr4, 0);
1251         gfar_write(&regs->igaddr5, 0);
1252         gfar_write(&regs->igaddr6, 0);
1253         gfar_write(&regs->igaddr7, 0);
1254
1255         gfar_write(&regs->gaddr0, 0);
1256         gfar_write(&regs->gaddr1, 0);
1257         gfar_write(&regs->gaddr2, 0);
1258         gfar_write(&regs->gaddr3, 0);
1259         gfar_write(&regs->gaddr4, 0);
1260         gfar_write(&regs->gaddr5, 0);
1261         gfar_write(&regs->gaddr6, 0);
1262         gfar_write(&regs->gaddr7, 0);
1263
1264         /* Zero out the rmon mib registers if it has them */
1265         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1266                 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1267
1268                 /* Mask off the CAM interrupts */
1269                 gfar_write(&regs->rmon.cam1, 0xffffffff);
1270                 gfar_write(&regs->rmon.cam2, 0xffffffff);
1271         }
1272
1273         /* Initialize the max receive buffer length */
1274         gfar_write(&regs->mrblr, priv->rx_buffer_size);
1275
1276         /* Initialize the Minimum Frame Length Register */
1277         gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1278 }
1279
1280
1281 /* Halt the receive and transmit queues */
1282 static void gfar_halt_nodisable(struct net_device *dev)
1283 {
1284         struct gfar_private *priv = netdev_priv(dev);
1285         struct gfar __iomem *regs = priv->gfargrp.regs;
1286         u32 tempval;
1287
1288         /* Mask all interrupts */
1289         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1290
1291         /* Clear all interrupts */
1292         gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1293
1294         /* Stop the DMA, and wait for it to stop */
1295         tempval = gfar_read(&regs->dmactrl);
1296         if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1297             != (DMACTRL_GRS | DMACTRL_GTS)) {
1298                 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1299                 gfar_write(&regs->dmactrl, tempval);
1300
1301                 while (!(gfar_read(&regs->ievent) &
1302                          (IEVENT_GRSC | IEVENT_GTSC)))
1303                         cpu_relax();
1304         }
1305 }
1306
1307 /* Halt the receive and transmit queues */
1308 void gfar_halt(struct net_device *dev)
1309 {
1310         struct gfar_private *priv = netdev_priv(dev);
1311         struct gfar __iomem *regs = priv->gfargrp.regs;
1312         u32 tempval;
1313
1314         gfar_halt_nodisable(dev);
1315
1316         /* Disable Rx and Tx */
1317         tempval = gfar_read(&regs->maccfg1);
1318         tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1319         gfar_write(&regs->maccfg1, tempval);
1320 }
1321
1322 void stop_gfar(struct net_device *dev)
1323 {
1324         struct gfar_private *priv = netdev_priv(dev);
1325         unsigned long flags;
1326
1327         phy_stop(priv->phydev);
1328
1329
1330         /* Lock it down */
1331         local_irq_save(flags);
1332         lock_tx_qs(priv);
1333         lock_rx_qs(priv);
1334
1335         gfar_halt(dev);
1336
1337         unlock_rx_qs(priv);
1338         unlock_tx_qs(priv);
1339         local_irq_restore(flags);
1340
1341         /* Free the IRQs */
1342         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1343                 free_irq(priv->gfargrp.interruptError, &priv->gfargrp);
1344                 free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
1345                 free_irq(priv->gfargrp.interruptReceive, &priv->gfargrp);
1346         } else {
1347                 free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
1348         }
1349
1350         free_skb_resources(priv);
1351 }
1352
1353 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1354 {
1355         struct txbd8 *txbdp;
1356         struct gfar_private *priv = netdev_priv(tx_queue->dev);
1357         int i, j;
1358
1359         txbdp = tx_queue->tx_bd_base;
1360
1361         for (i = 0; i < tx_queue->tx_ring_size; i++) {
1362                 if (!tx_queue->tx_skbuff[i])
1363                         continue;
1364
1365                 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1366                                 txbdp->length, DMA_TO_DEVICE);
1367                 txbdp->lstatus = 0;
1368                 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1369                                 j++) {
1370                         txbdp++;
1371                         dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1372                                         txbdp->length, DMA_TO_DEVICE);
1373                 }
1374                 txbdp++;
1375                 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1376                 tx_queue->tx_skbuff[i] = NULL;
1377         }
1378         kfree(tx_queue->tx_skbuff);
1379 }
1380
1381 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1382 {
1383         struct rxbd8 *rxbdp;
1384         struct gfar_private *priv = netdev_priv(rx_queue->dev);
1385         int i;
1386
1387         rxbdp = rx_queue->rx_bd_base;
1388
1389         for (i = 0; i < rx_queue->rx_ring_size; i++) {
1390                 if (rx_queue->rx_skbuff[i]) {
1391                         dma_unmap_single(&priv->ofdev->dev,
1392                                         rxbdp->bufPtr, priv->rx_buffer_size,
1393                                         DMA_FROM_DEVICE);
1394                         dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1395                         rx_queue->rx_skbuff[i] = NULL;
1396                 }
1397                 rxbdp->lstatus = 0;
1398                 rxbdp->bufPtr = 0;
1399                 rxbdp++;
1400         }
1401         kfree(rx_queue->rx_skbuff);
1402 }
1403
1404 /* If there are any tx skbs or rx skbs still around, free them.
1405  * Then free tx_skbuff and rx_skbuff */
1406 static void free_skb_resources(struct gfar_private *priv)
1407 {
1408         struct gfar_priv_tx_q *tx_queue = NULL;
1409         struct gfar_priv_rx_q *rx_queue = NULL;
1410         int i;
1411
1412         /* Go through all the buffer descriptors and free their data buffers */
1413         for (i = 0; i < priv->num_tx_queues; i++) {
1414                 tx_queue = priv->tx_queue[i];
1415                 if(!tx_queue->tx_skbuff)
1416                         free_skb_tx_queue(tx_queue);
1417         }
1418
1419         for (i = 0; i < priv->num_rx_queues; i++) {
1420                 rx_queue = priv->rx_queue[i];
1421                 if(!rx_queue->rx_skbuff)
1422                         free_skb_rx_queue(rx_queue);
1423         }
1424
1425         dma_free_coherent(&priv->ofdev->dev,
1426                         sizeof(struct txbd8) * priv->total_tx_ring_size +
1427                         sizeof(struct rxbd8) * priv->total_rx_ring_size,
1428                         priv->tx_queue[0]->tx_bd_base,
1429                         priv->tx_queue[0]->tx_bd_dma_base);
1430 }
1431
1432 void gfar_start(struct net_device *dev)
1433 {
1434         struct gfar_private *priv = netdev_priv(dev);
1435         struct gfar __iomem *regs = priv->gfargrp.regs;
1436         u32 tempval;
1437
1438         /* Enable Rx and Tx in MACCFG1 */
1439         tempval = gfar_read(&regs->maccfg1);
1440         tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1441         gfar_write(&regs->maccfg1, tempval);
1442
1443         /* Initialize DMACTRL to have WWR and WOP */
1444         tempval = gfar_read(&regs->dmactrl);
1445         tempval |= DMACTRL_INIT_SETTINGS;
1446         gfar_write(&regs->dmactrl, tempval);
1447
1448         /* Make sure we aren't stopped */
1449         tempval = gfar_read(&regs->dmactrl);
1450         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1451         gfar_write(&regs->dmactrl, tempval);
1452
1453         /* Clear THLT/RHLT, so that the DMA starts polling now */
1454         gfar_write(&regs->tstat, priv->gfargrp.tstat);
1455         gfar_write(&regs->rstat, priv->gfargrp.rstat);
1456
1457         /* Unmask the interrupts we look for */
1458         gfar_write(&regs->imask, IMASK_DEFAULT);
1459
1460         dev->trans_start = jiffies;
1461 }
1462
1463 /* Bring the controller up and running */
1464 int startup_gfar(struct net_device *ndev)
1465 {
1466         struct gfar_private *priv = netdev_priv(ndev);
1467         struct gfar __iomem *regs = priv->gfargrp.regs;
1468         int err;
1469
1470         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1471
1472         err = gfar_alloc_skb_resources(ndev);
1473         if (err)
1474                 return err;
1475
1476         gfar_init_mac(ndev);
1477
1478         /* If the device has multiple interrupts, register for
1479          * them.  Otherwise, only register for the one */
1480         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1481                 /* Install our interrupt handlers for Error,
1482                  * Transmit, and Receive */
1483                 err = request_irq(priv->gfargrp.interruptError, gfar_error, 0,
1484                                   priv->gfargrp.int_name_er, &priv->gfargrp);
1485                 if (err) {
1486                         if (netif_msg_intr(priv))
1487                                 pr_err("%s: Can't get IRQ %d\n", ndev->name,
1488                                        priv->gfargrp.interruptError);
1489                         goto err_irq_fail;
1490                 }
1491
1492                 err = request_irq(priv->gfargrp.interruptTransmit,
1493                                         gfar_transmit, 0,
1494                                         priv->gfargrp.int_name_tx,
1495                                         &priv->gfargrp);
1496                 if (err) {
1497                         if (netif_msg_intr(priv))
1498                                 pr_err("%s: Can't get IRQ %d\n", ndev->name,
1499                                        priv->gfargrp.interruptTransmit);
1500                         goto tx_irq_fail;
1501                 }
1502
1503                 err = request_irq(priv->gfargrp.interruptReceive,
1504                                         gfar_receive, 0,
1505                                         priv->gfargrp.int_name_rx,
1506                                         &priv->gfargrp);
1507                 if (err) {
1508                         if (netif_msg_intr(priv))
1509                                 pr_err("%s: Can't get IRQ %d (receive0)\n",
1510                                         ndev->name,
1511                                         priv->gfargrp.interruptReceive);
1512                         goto rx_irq_fail;
1513                 }
1514         } else {
1515                 err = request_irq(priv->gfargrp.interruptTransmit,
1516                                         gfar_interrupt, 0,
1517                                         priv->gfargrp.int_name_tx,
1518                                         &priv->gfargrp);
1519                 if (err) {
1520                         if (netif_msg_intr(priv))
1521                                 pr_err("%s: Can't get IRQ %d\n", ndev->name,
1522                                        priv->gfargrp.interruptTransmit);
1523                         goto err_irq_fail;
1524                 }
1525         }
1526
1527         /* Start the controller */
1528         gfar_start(ndev);
1529
1530         phy_start(priv->phydev);
1531
1532         return 0;
1533
1534 rx_irq_fail:
1535         free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
1536 tx_irq_fail:
1537         free_irq(priv->gfargrp.interruptError, &priv->gfargrp);
1538 err_irq_fail:
1539         free_skb_resources(priv);
1540         return err;
1541 }
1542
1543 /* Called when something needs to use the ethernet device */
1544 /* Returns 0 for success. */
1545 static int gfar_enet_open(struct net_device *dev)
1546 {
1547         struct gfar_private *priv = netdev_priv(dev);
1548         int err;
1549
1550         napi_enable(&priv->gfargrp.napi);
1551
1552         skb_queue_head_init(&priv->rx_recycle);
1553
1554         /* Initialize a bunch of registers */
1555         init_registers(dev);
1556
1557         gfar_set_mac_address(dev);
1558
1559         err = init_phy(dev);
1560
1561         if (err) {
1562                 napi_disable(&priv->gfargrp.napi);
1563                 return err;
1564         }
1565
1566         err = startup_gfar(dev);
1567         if (err) {
1568                 napi_disable(&priv->gfargrp.napi);
1569                 return err;
1570         }
1571
1572         netif_tx_start_all_queues(dev);
1573
1574         device_set_wakeup_enable(&dev->dev, priv->wol_en);
1575
1576         return err;
1577 }
1578
1579 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1580 {
1581         struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1582
1583         memset(fcb, 0, GMAC_FCB_LEN);
1584
1585         return fcb;
1586 }
1587
1588 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1589 {
1590         u8 flags = 0;
1591
1592         /* If we're here, it's a IP packet with a TCP or UDP
1593          * payload.  We set it to checksum, using a pseudo-header
1594          * we provide
1595          */
1596         flags = TXFCB_DEFAULT;
1597
1598         /* Tell the controller what the protocol is */
1599         /* And provide the already calculated phcs */
1600         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1601                 flags |= TXFCB_UDP;
1602                 fcb->phcs = udp_hdr(skb)->check;
1603         } else
1604                 fcb->phcs = tcp_hdr(skb)->check;
1605
1606         /* l3os is the distance between the start of the
1607          * frame (skb->data) and the start of the IP hdr.
1608          * l4os is the distance between the start of the
1609          * l3 hdr and the l4 hdr */
1610         fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
1611         fcb->l4os = skb_network_header_len(skb);
1612
1613         fcb->flags = flags;
1614 }
1615
1616 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1617 {
1618         fcb->flags |= TXFCB_VLN;
1619         fcb->vlctl = vlan_tx_tag_get(skb);
1620 }
1621
1622 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1623                                struct txbd8 *base, int ring_size)
1624 {
1625         struct txbd8 *new_bd = bdp + stride;
1626
1627         return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1628 }
1629
1630 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1631                 int ring_size)
1632 {
1633         return skip_txbd(bdp, 1, base, ring_size);
1634 }
1635
1636 /* This is called by the kernel when a frame is ready for transmission. */
1637 /* It is pointed to by the dev->hard_start_xmit function pointer */
1638 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1639 {
1640         struct gfar_private *priv = netdev_priv(dev);
1641         struct gfar_priv_tx_q *tx_queue = NULL;
1642         struct netdev_queue *txq;
1643         struct gfar __iomem *regs = NULL;
1644         struct txfcb *fcb = NULL;
1645         struct txbd8 *txbdp, *txbdp_start, *base;
1646         u32 lstatus;
1647         int i, rq = 0;
1648         u32 bufaddr;
1649         unsigned long flags;
1650         unsigned int nr_frags, length;
1651
1652
1653         rq = skb->queue_mapping;
1654         tx_queue = priv->tx_queue[rq];
1655         txq = netdev_get_tx_queue(dev, rq);
1656         base = tx_queue->tx_bd_base;
1657         regs = priv->gfargrp.regs;
1658
1659         /* make space for additional header when fcb is needed */
1660         if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
1661                         (priv->vlgrp && vlan_tx_tag_present(skb))) &&
1662                         (skb_headroom(skb) < GMAC_FCB_LEN)) {
1663                 struct sk_buff *skb_new;
1664
1665                 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
1666                 if (!skb_new) {
1667                         dev->stats.tx_errors++;
1668                         kfree_skb(skb);
1669                         return NETDEV_TX_OK;
1670                 }
1671                 kfree_skb(skb);
1672                 skb = skb_new;
1673         }
1674
1675         /* total number of fragments in the SKB */
1676         nr_frags = skb_shinfo(skb)->nr_frags;
1677
1678         spin_lock_irqsave(&tx_queue->txlock, flags);
1679
1680         /* check if there is space to queue this packet */
1681         if ((nr_frags+1) > tx_queue->num_txbdfree) {
1682                 /* no space, stop the queue */
1683                 netif_tx_stop_queue(txq);
1684                 dev->stats.tx_fifo_errors++;
1685                 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1686                 return NETDEV_TX_BUSY;
1687         }
1688
1689         /* Update transmit stats */
1690         dev->stats.tx_bytes += skb->len;
1691
1692         txbdp = txbdp_start = tx_queue->cur_tx;
1693
1694         if (nr_frags == 0) {
1695                 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1696         } else {
1697                 /* Place the fragment addresses and lengths into the TxBDs */
1698                 for (i = 0; i < nr_frags; i++) {
1699                         /* Point at the next BD, wrapping as needed */
1700                         txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1701
1702                         length = skb_shinfo(skb)->frags[i].size;
1703
1704                         lstatus = txbdp->lstatus | length |
1705                                 BD_LFLAG(TXBD_READY);
1706
1707                         /* Handle the last BD specially */
1708                         if (i == nr_frags - 1)
1709                                 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1710
1711                         bufaddr = dma_map_page(&priv->ofdev->dev,
1712                                         skb_shinfo(skb)->frags[i].page,
1713                                         skb_shinfo(skb)->frags[i].page_offset,
1714                                         length,
1715                                         DMA_TO_DEVICE);
1716
1717                         /* set the TxBD length and buffer pointer */
1718                         txbdp->bufPtr = bufaddr;
1719                         txbdp->lstatus = lstatus;
1720                 }
1721
1722                 lstatus = txbdp_start->lstatus;
1723         }
1724
1725         /* Set up checksumming */
1726         if (CHECKSUM_PARTIAL == skb->ip_summed) {
1727                 fcb = gfar_add_fcb(skb);
1728                 lstatus |= BD_LFLAG(TXBD_TOE);
1729                 gfar_tx_checksum(skb, fcb);
1730         }
1731
1732         if (priv->vlgrp && vlan_tx_tag_present(skb)) {
1733                 if (unlikely(NULL == fcb)) {
1734                         fcb = gfar_add_fcb(skb);
1735                         lstatus |= BD_LFLAG(TXBD_TOE);
1736                 }
1737
1738                 gfar_tx_vlan(skb, fcb);
1739         }
1740
1741         /* setup the TxBD length and buffer pointer for the first BD */
1742         tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1743         txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1744                         skb_headlen(skb), DMA_TO_DEVICE);
1745
1746         lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1747
1748         /*
1749          * The powerpc-specific eieio() is used, as wmb() has too strong
1750          * semantics (it requires synchronization between cacheable and
1751          * uncacheable mappings, which eieio doesn't provide and which we
1752          * don't need), thus requiring a more expensive sync instruction.  At
1753          * some point, the set of architecture-independent barrier functions
1754          * should be expanded to include weaker barriers.
1755          */
1756         eieio();
1757
1758         txbdp_start->lstatus = lstatus;
1759
1760         /* Update the current skb pointer to the next entry we will use
1761          * (wrapping if necessary) */
1762         tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1763                 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1764
1765         tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1766
1767         /* reduce TxBD free count */
1768         tx_queue->num_txbdfree -= (nr_frags + 1);
1769
1770         dev->trans_start = jiffies;
1771
1772         /* If the next BD still needs to be cleaned up, then the bds
1773            are full.  We need to tell the kernel to stop sending us stuff. */
1774         if (!tx_queue->num_txbdfree) {
1775                 netif_tx_stop_queue(txq);
1776
1777                 dev->stats.tx_fifo_errors++;
1778         }
1779
1780         /* Tell the DMA to go go go */
1781         gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1782
1783         /* Unlock priv */
1784         spin_unlock_irqrestore(&tx_queue->txlock, flags);
1785
1786         return NETDEV_TX_OK;
1787 }
1788
1789 /* Stops the kernel queue, and halts the controller */
1790 static int gfar_close(struct net_device *dev)
1791 {
1792         struct gfar_private *priv = netdev_priv(dev);
1793
1794         napi_disable(&priv->gfargrp.napi);
1795
1796         skb_queue_purge(&priv->rx_recycle);
1797         cancel_work_sync(&priv->reset_task);
1798         stop_gfar(dev);
1799
1800         /* Disconnect from the PHY */
1801         phy_disconnect(priv->phydev);
1802         priv->phydev = NULL;
1803
1804         netif_tx_stop_all_queues(dev);
1805
1806         return 0;
1807 }
1808
1809 /* Changes the mac address if the controller is not running. */
1810 static int gfar_set_mac_address(struct net_device *dev)
1811 {
1812         gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1813
1814         return 0;
1815 }
1816
1817
1818 /* Enables and disables VLAN insertion/extraction */
1819 static void gfar_vlan_rx_register(struct net_device *dev,
1820                 struct vlan_group *grp)
1821 {
1822         struct gfar_private *priv = netdev_priv(dev);
1823         struct gfar __iomem *regs = NULL;
1824         unsigned long flags;
1825         u32 tempval;
1826
1827         regs = priv->gfargrp.regs;
1828         local_irq_save(flags);
1829         lock_rx_qs(priv);
1830
1831         priv->vlgrp = grp;
1832
1833         if (grp) {
1834                 /* Enable VLAN tag insertion */
1835                 tempval = gfar_read(&regs->tctrl);
1836                 tempval |= TCTRL_VLINS;
1837
1838                 gfar_write(&regs->tctrl, tempval);
1839
1840                 /* Enable VLAN tag extraction */
1841                 tempval = gfar_read(&regs->rctrl);
1842                 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
1843                 gfar_write(&regs->rctrl, tempval);
1844         } else {
1845                 /* Disable VLAN tag insertion */
1846                 tempval = gfar_read(&regs->tctrl);
1847                 tempval &= ~TCTRL_VLINS;
1848                 gfar_write(&regs->tctrl, tempval);
1849
1850                 /* Disable VLAN tag extraction */
1851                 tempval = gfar_read(&regs->rctrl);
1852                 tempval &= ~RCTRL_VLEX;
1853                 /* If parse is no longer required, then disable parser */
1854                 if (tempval & RCTRL_REQ_PARSER)
1855                         tempval |= RCTRL_PRSDEP_INIT;
1856                 else
1857                         tempval &= ~RCTRL_PRSDEP_INIT;
1858                 gfar_write(&regs->rctrl, tempval);
1859         }
1860
1861         gfar_change_mtu(dev, dev->mtu);
1862
1863         unlock_rx_qs(priv);
1864         local_irq_restore(flags);
1865 }
1866
1867 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1868 {
1869         int tempsize, tempval;
1870         struct gfar_private *priv = netdev_priv(dev);
1871         struct gfar __iomem *regs = priv->gfargrp.regs;
1872         int oldsize = priv->rx_buffer_size;
1873         int frame_size = new_mtu + ETH_HLEN;
1874
1875         if (priv->vlgrp)
1876                 frame_size += VLAN_HLEN;
1877
1878         if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1879                 if (netif_msg_drv(priv))
1880                         printk(KERN_ERR "%s: Invalid MTU setting\n",
1881                                         dev->name);
1882                 return -EINVAL;
1883         }
1884
1885         if (gfar_uses_fcb(priv))
1886                 frame_size += GMAC_FCB_LEN;
1887
1888         frame_size += priv->padding;
1889
1890         tempsize =
1891             (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1892             INCREMENTAL_BUFFER_SIZE;
1893
1894         /* Only stop and start the controller if it isn't already
1895          * stopped, and we changed something */
1896         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1897                 stop_gfar(dev);
1898
1899         priv->rx_buffer_size = tempsize;
1900
1901         dev->mtu = new_mtu;
1902
1903         gfar_write(&regs->mrblr, priv->rx_buffer_size);
1904         gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1905
1906         /* If the mtu is larger than the max size for standard
1907          * ethernet frames (ie, a jumbo frame), then set maccfg2
1908          * to allow huge frames, and to check the length */
1909         tempval = gfar_read(&regs->maccfg2);
1910
1911         if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1912                 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1913         else
1914                 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1915
1916         gfar_write(&regs->maccfg2, tempval);
1917
1918         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1919                 startup_gfar(dev);
1920
1921         return 0;
1922 }
1923
1924 /* gfar_reset_task gets scheduled when a packet has not been
1925  * transmitted after a set amount of time.
1926  * For now, assume that clearing out all the structures, and
1927  * starting over will fix the problem.
1928  */
1929 static void gfar_reset_task(struct work_struct *work)
1930 {
1931         struct gfar_private *priv = container_of(work, struct gfar_private,
1932                         reset_task);
1933         struct net_device *dev = priv->ndev;
1934
1935         if (dev->flags & IFF_UP) {
1936                 netif_tx_stop_all_queues(dev);
1937                 stop_gfar(dev);
1938                 startup_gfar(dev);
1939                 netif_tx_start_all_queues(dev);
1940         }
1941
1942         netif_tx_schedule_all(dev);
1943 }
1944
1945 static void gfar_timeout(struct net_device *dev)
1946 {
1947         struct gfar_private *priv = netdev_priv(dev);
1948
1949         dev->stats.tx_errors++;
1950         schedule_work(&priv->reset_task);
1951 }
1952
1953 /* Interrupt Handler for Transmit complete */
1954 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
1955 {
1956         struct net_device *dev = tx_queue->dev;
1957         struct gfar_private *priv = netdev_priv(dev);
1958         struct gfar_priv_rx_q *rx_queue = NULL;
1959         struct txbd8 *bdp;
1960         struct txbd8 *lbdp = NULL;
1961         struct txbd8 *base = tx_queue->tx_bd_base;
1962         struct sk_buff *skb;
1963         int skb_dirtytx;
1964         int tx_ring_size = tx_queue->tx_ring_size;
1965         int frags = 0;
1966         int i;
1967         int howmany = 0;
1968         u32 lstatus;
1969
1970         rx_queue = priv->rx_queue[tx_queue->qindex];
1971         bdp = tx_queue->dirty_tx;
1972         skb_dirtytx = tx_queue->skb_dirtytx;
1973
1974         while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
1975                 frags = skb_shinfo(skb)->nr_frags;
1976                 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
1977
1978                 lstatus = lbdp->lstatus;
1979
1980                 /* Only clean completed frames */
1981                 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
1982                                 (lstatus & BD_LENGTH_MASK))
1983                         break;
1984
1985                 dma_unmap_single(&priv->ofdev->dev,
1986                                 bdp->bufPtr,
1987                                 bdp->length,
1988                                 DMA_TO_DEVICE);
1989
1990                 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
1991                 bdp = next_txbd(bdp, base, tx_ring_size);
1992
1993                 for (i = 0; i < frags; i++) {
1994                         dma_unmap_page(&priv->ofdev->dev,
1995                                         bdp->bufPtr,
1996                                         bdp->length,
1997                                         DMA_TO_DEVICE);
1998                         bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
1999                         bdp = next_txbd(bdp, base, tx_ring_size);
2000                 }
2001
2002                 /*
2003                  * If there's room in the queue (limit it to rx_buffer_size)
2004                  * we add this skb back into the pool, if it's the right size
2005                  */
2006                 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2007                                 skb_recycle_check(skb, priv->rx_buffer_size +
2008                                         RXBUF_ALIGNMENT))
2009                         __skb_queue_head(&priv->rx_recycle, skb);
2010                 else
2011                         dev_kfree_skb_any(skb);
2012
2013                 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2014
2015                 skb_dirtytx = (skb_dirtytx + 1) &
2016                         TX_RING_MOD_MASK(tx_ring_size);
2017
2018                 howmany++;
2019                 tx_queue->num_txbdfree += frags + 1;
2020         }
2021
2022         /* If we freed a buffer, we can restart transmission, if necessary */
2023         if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
2024                 netif_wake_subqueue(dev, tx_queue->qindex);
2025
2026         /* Update dirty indicators */
2027         tx_queue->skb_dirtytx = skb_dirtytx;
2028         tx_queue->dirty_tx = bdp;
2029
2030         dev->stats.tx_packets += howmany;
2031
2032         return howmany;
2033 }
2034
2035 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2036 {
2037         unsigned long flags;
2038
2039         spin_lock_irqsave(&gfargrp->grplock, flags);
2040         if (napi_schedule_prep(&gfargrp->napi)) {
2041                 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2042                 __napi_schedule(&gfargrp->napi);
2043         } else {
2044                 /*
2045                  * Clear IEVENT, so interrupts aren't called again
2046                  * because of the packets that have already arrived.
2047                  */
2048                 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2049         }
2050         spin_unlock_irqrestore(&gfargrp->grplock, flags);
2051
2052 }
2053
2054 /* Interrupt Handler for Transmit complete */
2055 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2056 {
2057         gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2058         return IRQ_HANDLED;
2059 }
2060
2061 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2062                 struct sk_buff *skb)
2063 {
2064         struct net_device *dev = rx_queue->dev;
2065         struct gfar_private *priv = netdev_priv(dev);
2066         dma_addr_t buf;
2067
2068         buf = dma_map_single(&priv->ofdev->dev, skb->data,
2069                              priv->rx_buffer_size, DMA_FROM_DEVICE);
2070         gfar_init_rxbdp(rx_queue, bdp, buf);
2071 }
2072
2073
2074 struct sk_buff * gfar_new_skb(struct net_device *dev)
2075 {
2076         unsigned int alignamount;
2077         struct gfar_private *priv = netdev_priv(dev);
2078         struct sk_buff *skb = NULL;
2079
2080         skb = __skb_dequeue(&priv->rx_recycle);
2081         if (!skb)
2082                 skb = netdev_alloc_skb(dev,
2083                                 priv->rx_buffer_size + RXBUF_ALIGNMENT);
2084
2085         if (!skb)
2086                 return NULL;
2087
2088         alignamount = RXBUF_ALIGNMENT -
2089                 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
2090
2091         /* We need the data buffer to be aligned properly.  We will reserve
2092          * as many bytes as needed to align the data properly
2093          */
2094         skb_reserve(skb, alignamount);
2095
2096         return skb;
2097 }
2098
2099 static inline void count_errors(unsigned short status, struct net_device *dev)
2100 {
2101         struct gfar_private *priv = netdev_priv(dev);
2102         struct net_device_stats *stats = &dev->stats;
2103         struct gfar_extra_stats *estats = &priv->extra_stats;
2104
2105         /* If the packet was truncated, none of the other errors
2106          * matter */
2107         if (status & RXBD_TRUNCATED) {
2108                 stats->rx_length_errors++;
2109
2110                 estats->rx_trunc++;
2111
2112                 return;
2113         }
2114         /* Count the errors, if there were any */
2115         if (status & (RXBD_LARGE | RXBD_SHORT)) {
2116                 stats->rx_length_errors++;
2117
2118                 if (status & RXBD_LARGE)
2119                         estats->rx_large++;
2120                 else
2121                         estats->rx_short++;
2122         }
2123         if (status & RXBD_NONOCTET) {
2124                 stats->rx_frame_errors++;
2125                 estats->rx_nonoctet++;
2126         }
2127         if (status & RXBD_CRCERR) {
2128                 estats->rx_crcerr++;
2129                 stats->rx_crc_errors++;
2130         }
2131         if (status & RXBD_OVERRUN) {
2132                 estats->rx_overrun++;
2133                 stats->rx_crc_errors++;
2134         }
2135 }
2136
2137 irqreturn_t gfar_receive(int irq, void *grp_id)
2138 {
2139         gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2140         return IRQ_HANDLED;
2141 }
2142
2143 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2144 {
2145         /* If valid headers were found, and valid sums
2146          * were verified, then we tell the kernel that no
2147          * checksumming is necessary.  Otherwise, it is */
2148         if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2149                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2150         else
2151                 skb->ip_summed = CHECKSUM_NONE;
2152 }
2153
2154
2155 /* gfar_process_frame() -- handle one incoming packet if skb
2156  * isn't NULL.  */
2157 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2158                               int amount_pull)
2159 {
2160         struct gfar_private *priv = netdev_priv(dev);
2161         struct rxfcb *fcb = NULL;
2162
2163         int ret;
2164
2165         /* fcb is at the beginning if exists */
2166         fcb = (struct rxfcb *)skb->data;
2167
2168         /* Remove the FCB from the skb */
2169         skb_set_queue_mapping(skb, fcb->rq);
2170         /* Remove the padded bytes, if there are any */
2171         if (amount_pull)
2172                 skb_pull(skb, amount_pull);
2173
2174         if (priv->rx_csum_enable)
2175                 gfar_rx_checksum(skb, fcb);
2176
2177         /* Tell the skb what kind of packet this is */
2178         skb->protocol = eth_type_trans(skb, dev);
2179
2180         /* Send the packet up the stack */
2181         if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
2182                 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
2183         else
2184                 ret = netif_receive_skb(skb);
2185
2186         if (NET_RX_DROP == ret)
2187                 priv->extra_stats.kernel_dropped++;
2188
2189         return 0;
2190 }
2191
2192 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2193  *   until the budget/quota has been reached. Returns the number
2194  *   of frames handled
2195  */
2196 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2197 {
2198         struct net_device *dev = rx_queue->dev;
2199         struct rxbd8 *bdp, *base;
2200         struct sk_buff *skb;
2201         int pkt_len;
2202         int amount_pull;
2203         int howmany = 0;
2204         struct gfar_private *priv = netdev_priv(dev);
2205
2206         /* Get the first full descriptor */
2207         bdp = rx_queue->cur_rx;
2208         base = rx_queue->rx_bd_base;
2209
2210         amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
2211                 priv->padding;
2212
2213         while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2214                 struct sk_buff *newskb;
2215                 rmb();
2216
2217                 /* Add another skb for the future */
2218                 newskb = gfar_new_skb(dev);
2219
2220                 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2221
2222                 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2223                                 priv->rx_buffer_size, DMA_FROM_DEVICE);
2224
2225                 /* We drop the frame if we failed to allocate a new buffer */
2226                 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2227                                  bdp->status & RXBD_ERR)) {
2228                         count_errors(bdp->status, dev);
2229
2230                         if (unlikely(!newskb))
2231                                 newskb = skb;
2232                         else if (skb) {
2233                                 /*
2234                                  * We need to reset ->data to what it
2235                                  * was before gfar_new_skb() re-aligned
2236                                  * it to an RXBUF_ALIGNMENT boundary
2237                                  * before we put the skb back on the
2238                                  * recycle list.
2239                                  */
2240                                 skb->data = skb->head + NET_SKB_PAD;
2241                                 __skb_queue_head(&priv->rx_recycle, skb);
2242                         }
2243                 } else {
2244                         /* Increment the number of packets */
2245                         dev->stats.rx_packets++;
2246                         howmany++;
2247
2248                         if (likely(skb)) {
2249                                 pkt_len = bdp->length - ETH_FCS_LEN;
2250                                 /* Remove the FCS from the packet length */
2251                                 skb_put(skb, pkt_len);
2252                                 dev->stats.rx_bytes += pkt_len;
2253
2254                                 if (in_irq() || irqs_disabled())
2255                                         printk("Interrupt problem!\n");
2256                                 gfar_process_frame(dev, skb, amount_pull);
2257
2258                         } else {
2259                                 if (netif_msg_rx_err(priv))
2260                                         printk(KERN_WARNING
2261                                                "%s: Missing skb!\n", dev->name);
2262                                 dev->stats.rx_dropped++;
2263                                 priv->extra_stats.rx_skbmissing++;
2264                         }
2265
2266                 }
2267
2268                 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2269
2270                 /* Setup the new bdp */
2271                 gfar_new_rxbdp(rx_queue, bdp, newskb);
2272
2273                 /* Update to the next pointer */
2274                 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2275
2276                 /* update to point at the next skb */
2277                 rx_queue->skb_currx =
2278                     (rx_queue->skb_currx + 1) &
2279                     RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2280         }
2281
2282         /* Update the current rxbd pointer to be the next one */
2283         rx_queue->cur_rx = bdp;
2284
2285         return howmany;
2286 }
2287
2288 static int gfar_poll(struct napi_struct *napi, int budget)
2289 {
2290         struct gfar_priv_grp *gfargrp = container_of(napi,
2291                         struct gfar_priv_grp, napi);
2292         struct gfar_private *priv = gfargrp->priv;
2293         struct gfar __iomem *regs = priv->gfargrp.regs;
2294         struct gfar_priv_tx_q *tx_queue = NULL;
2295         struct gfar_priv_rx_q *rx_queue = NULL;
2296         int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2297         int tx_cleaned = 0, i, left_over_budget = budget, serviced_queues = 0;
2298         int num_queues = 0;
2299         unsigned long flags;
2300
2301         num_queues = gfargrp->num_rx_queues;
2302         budget_per_queue = budget/num_queues;
2303
2304         /* Clear IEVENT, so interrupts aren't called again
2305          * because of the packets that have already arrived */
2306         gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2307
2308         while (num_queues && left_over_budget) {
2309
2310                 budget_per_queue = left_over_budget/num_queues;
2311                 left_over_budget = 0;
2312
2313                 for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2314                         if (test_bit(i, &serviced_queues))
2315                                 continue;
2316                         rx_queue = priv->rx_queue[i];
2317                         tx_queue = priv->tx_queue[rx_queue->qindex];
2318
2319                         /* If we fail to get the lock,
2320                          * don't bother with the TX BDs */
2321                         if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
2322                                 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2323                                 spin_unlock_irqrestore(&tx_queue->txlock,
2324                                                         flags);
2325                         }
2326
2327                         rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2328                                                         budget_per_queue);
2329                         rx_cleaned += rx_cleaned_per_queue;
2330                         if(rx_cleaned_per_queue < budget_per_queue) {
2331                                 left_over_budget = left_over_budget +
2332                                         (budget_per_queue - rx_cleaned_per_queue);
2333                                 set_bit(i, &serviced_queues);
2334                                 num_queues--;
2335                         }
2336                 }
2337         }
2338
2339         if (tx_cleaned)
2340                 return budget;
2341
2342         if (rx_cleaned < budget) {
2343                 napi_complete(napi);
2344
2345                 /* Clear the halt bit in RSTAT */
2346                 gfar_write(&regs->rstat, gfargrp->rstat);
2347
2348                 gfar_write(&regs->imask, IMASK_DEFAULT);
2349
2350                 /* If we are coalescing interrupts, update the timer */
2351                 /* Otherwise, clear it */
2352                 if (likely(rx_queue->rxcoalescing)) {
2353                         gfar_write(&regs->rxic, 0);
2354                         gfar_write(&regs->rxic, rx_queue->rxic);
2355                 }
2356                 if (likely(tx_queue->txcoalescing)) {
2357                         gfar_write(&regs->txic, 0);
2358                         gfar_write(&regs->txic, tx_queue->txic);
2359                 }
2360         }
2361
2362         return rx_cleaned;
2363 }
2364
2365 #ifdef CONFIG_NET_POLL_CONTROLLER
2366 /*
2367  * Polling 'interrupt' - used by things like netconsole to send skbs
2368  * without having to re-enable interrupts. It's not called while
2369  * the interrupt routine is executing.
2370  */
2371 static void gfar_netpoll(struct net_device *dev)
2372 {
2373         struct gfar_private *priv = netdev_priv(dev);
2374
2375         /* If the device has multiple interrupts, run tx/rx */
2376         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2377                 disable_irq(priv->gfargrp.interruptTransmit);
2378                 disable_irq(priv->gfargrp.interruptReceive);
2379                 disable_irq(priv->gfargrp.interruptError);
2380                 gfar_interrupt(priv->gfargrp.interruptTransmit, &priv->gfargrp);
2381                 enable_irq(priv->gfargrp.interruptError);
2382                 enable_irq(priv->gfargrp.interruptReceive);
2383                 enable_irq(priv->gfargrp.interruptTransmit);
2384         } else {
2385                 disable_irq(priv->gfargrp.interruptTransmit);
2386                 gfar_interrupt(priv->gfargrp.interruptTransmit, &priv->gfargrp);
2387                 enable_irq(priv->gfargrp.interruptTransmit);
2388         }
2389 }
2390 #endif
2391
2392 /* The interrupt handler for devices with one interrupt */
2393 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2394 {
2395         struct gfar_priv_grp *gfargrp = grp_id;
2396
2397         /* Save ievent for future reference */
2398         u32 events = gfar_read(&gfargrp->regs->ievent);
2399
2400         /* Check for reception */
2401         if (events & IEVENT_RX_MASK)
2402                 gfar_receive(irq, grp_id);
2403
2404         /* Check for transmit completion */
2405         if (events & IEVENT_TX_MASK)
2406                 gfar_transmit(irq, grp_id);
2407
2408         /* Check for errors */
2409         if (events & IEVENT_ERR_MASK)
2410                 gfar_error(irq, grp_id);
2411
2412         return IRQ_HANDLED;
2413 }
2414
2415 /* Called every time the controller might need to be made
2416  * aware of new link state.  The PHY code conveys this
2417  * information through variables in the phydev structure, and this
2418  * function converts those variables into the appropriate
2419  * register values, and can bring down the device if needed.
2420  */
2421 static void adjust_link(struct net_device *dev)
2422 {
2423         struct gfar_private *priv = netdev_priv(dev);
2424         struct gfar __iomem *regs = priv->gfargrp.regs;
2425         unsigned long flags;
2426         struct phy_device *phydev = priv->phydev;
2427         int new_state = 0;
2428
2429         local_irq_save(flags);
2430         lock_tx_qs(priv);
2431
2432         if (phydev->link) {
2433                 u32 tempval = gfar_read(&regs->maccfg2);
2434                 u32 ecntrl = gfar_read(&regs->ecntrl);
2435
2436                 /* Now we make sure that we can be in full duplex mode.
2437                  * If not, we operate in half-duplex mode. */
2438                 if (phydev->duplex != priv->oldduplex) {
2439                         new_state = 1;
2440                         if (!(phydev->duplex))
2441                                 tempval &= ~(MACCFG2_FULL_DUPLEX);
2442                         else
2443                                 tempval |= MACCFG2_FULL_DUPLEX;
2444
2445                         priv->oldduplex = phydev->duplex;
2446                 }
2447
2448                 if (phydev->speed != priv->oldspeed) {
2449                         new_state = 1;
2450                         switch (phydev->speed) {
2451                         case 1000:
2452                                 tempval =
2453                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
2454
2455                                 ecntrl &= ~(ECNTRL_R100);
2456                                 break;
2457                         case 100:
2458                         case 10:
2459                                 tempval =
2460                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2461
2462                                 /* Reduced mode distinguishes
2463                                  * between 10 and 100 */
2464                                 if (phydev->speed == SPEED_100)
2465                                         ecntrl |= ECNTRL_R100;
2466                                 else
2467                                         ecntrl &= ~(ECNTRL_R100);
2468                                 break;
2469                         default:
2470                                 if (netif_msg_link(priv))
2471                                         printk(KERN_WARNING
2472                                                 "%s: Ack!  Speed (%d) is not 10/100/1000!\n",
2473                                                 dev->name, phydev->speed);
2474                                 break;
2475                         }
2476
2477                         priv->oldspeed = phydev->speed;
2478                 }
2479
2480                 gfar_write(&regs->maccfg2, tempval);
2481                 gfar_write(&regs->ecntrl, ecntrl);
2482
2483                 if (!priv->oldlink) {
2484                         new_state = 1;
2485                         priv->oldlink = 1;
2486                 }
2487         } else if (priv->oldlink) {
2488                 new_state = 1;
2489                 priv->oldlink = 0;
2490                 priv->oldspeed = 0;
2491                 priv->oldduplex = -1;
2492         }
2493
2494         if (new_state && netif_msg_link(priv))
2495                 phy_print_status(phydev);
2496         unlock_tx_qs(priv);
2497         local_irq_restore(flags);
2498 }
2499
2500 /* Update the hash table based on the current list of multicast
2501  * addresses we subscribe to.  Also, change the promiscuity of
2502  * the device based on the flags (this function is called
2503  * whenever dev->flags is changed */
2504 static void gfar_set_multi(struct net_device *dev)
2505 {
2506         struct dev_mc_list *mc_ptr;
2507         struct gfar_private *priv = netdev_priv(dev);
2508         struct gfar __iomem *regs = priv->gfargrp.regs;
2509         u32 tempval;
2510
2511         if (dev->flags & IFF_PROMISC) {
2512                 /* Set RCTRL to PROM */
2513                 tempval = gfar_read(&regs->rctrl);
2514                 tempval |= RCTRL_PROM;
2515                 gfar_write(&regs->rctrl, tempval);
2516         } else {
2517                 /* Set RCTRL to not PROM */
2518                 tempval = gfar_read(&regs->rctrl);
2519                 tempval &= ~(RCTRL_PROM);
2520                 gfar_write(&regs->rctrl, tempval);
2521         }
2522
2523         if (dev->flags & IFF_ALLMULTI) {
2524                 /* Set the hash to rx all multicast frames */
2525                 gfar_write(&regs->igaddr0, 0xffffffff);
2526                 gfar_write(&regs->igaddr1, 0xffffffff);
2527                 gfar_write(&regs->igaddr2, 0xffffffff);
2528                 gfar_write(&regs->igaddr3, 0xffffffff);
2529                 gfar_write(&regs->igaddr4, 0xffffffff);
2530                 gfar_write(&regs->igaddr5, 0xffffffff);
2531                 gfar_write(&regs->igaddr6, 0xffffffff);
2532                 gfar_write(&regs->igaddr7, 0xffffffff);
2533                 gfar_write(&regs->gaddr0, 0xffffffff);
2534                 gfar_write(&regs->gaddr1, 0xffffffff);
2535                 gfar_write(&regs->gaddr2, 0xffffffff);
2536                 gfar_write(&regs->gaddr3, 0xffffffff);
2537                 gfar_write(&regs->gaddr4, 0xffffffff);
2538                 gfar_write(&regs->gaddr5, 0xffffffff);
2539                 gfar_write(&regs->gaddr6, 0xffffffff);
2540                 gfar_write(&regs->gaddr7, 0xffffffff);
2541         } else {
2542                 int em_num;
2543                 int idx;
2544
2545                 /* zero out the hash */
2546                 gfar_write(&regs->igaddr0, 0x0);
2547                 gfar_write(&regs->igaddr1, 0x0);
2548                 gfar_write(&regs->igaddr2, 0x0);
2549                 gfar_write(&regs->igaddr3, 0x0);
2550                 gfar_write(&regs->igaddr4, 0x0);
2551                 gfar_write(&regs->igaddr5, 0x0);
2552                 gfar_write(&regs->igaddr6, 0x0);
2553                 gfar_write(&regs->igaddr7, 0x0);
2554                 gfar_write(&regs->gaddr0, 0x0);
2555                 gfar_write(&regs->gaddr1, 0x0);
2556                 gfar_write(&regs->gaddr2, 0x0);
2557                 gfar_write(&regs->gaddr3, 0x0);
2558                 gfar_write(&regs->gaddr4, 0x0);
2559                 gfar_write(&regs->gaddr5, 0x0);
2560                 gfar_write(&regs->gaddr6, 0x0);
2561                 gfar_write(&regs->gaddr7, 0x0);
2562
2563                 /* If we have extended hash tables, we need to
2564                  * clear the exact match registers to prepare for
2565                  * setting them */
2566                 if (priv->extended_hash) {
2567                         em_num = GFAR_EM_NUM + 1;
2568                         gfar_clear_exact_match(dev);
2569                         idx = 1;
2570                 } else {
2571                         idx = 0;
2572                         em_num = 0;
2573                 }
2574
2575                 if (dev->mc_count == 0)
2576                         return;
2577
2578                 /* Parse the list, and set the appropriate bits */
2579                 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
2580                         if (idx < em_num) {
2581                                 gfar_set_mac_for_addr(dev, idx,
2582                                                 mc_ptr->dmi_addr);
2583                                 idx++;
2584                         } else
2585                                 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
2586                 }
2587         }
2588
2589         return;
2590 }
2591
2592
2593 /* Clears each of the exact match registers to zero, so they
2594  * don't interfere with normal reception */
2595 static void gfar_clear_exact_match(struct net_device *dev)
2596 {
2597         int idx;
2598         u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
2599
2600         for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
2601                 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
2602 }
2603
2604 /* Set the appropriate hash bit for the given addr */
2605 /* The algorithm works like so:
2606  * 1) Take the Destination Address (ie the multicast address), and
2607  * do a CRC on it (little endian), and reverse the bits of the
2608  * result.
2609  * 2) Use the 8 most significant bits as a hash into a 256-entry
2610  * table.  The table is controlled through 8 32-bit registers:
2611  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
2612  * gaddr7.  This means that the 3 most significant bits in the
2613  * hash index which gaddr register to use, and the 5 other bits
2614  * indicate which bit (assuming an IBM numbering scheme, which
2615  * for PowerPC (tm) is usually the case) in the register holds
2616  * the entry. */
2617 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2618 {
2619         u32 tempval;
2620         struct gfar_private *priv = netdev_priv(dev);
2621         u32 result = ether_crc(MAC_ADDR_LEN, addr);
2622         int width = priv->hash_width;
2623         u8 whichbit = (result >> (32 - width)) & 0x1f;
2624         u8 whichreg = result >> (32 - width + 5);
2625         u32 value = (1 << (31-whichbit));
2626
2627         tempval = gfar_read(priv->hash_regs[whichreg]);
2628         tempval |= value;
2629         gfar_write(priv->hash_regs[whichreg], tempval);
2630
2631         return;
2632 }
2633
2634
2635 /* There are multiple MAC Address register pairs on some controllers
2636  * This function sets the numth pair to a given address
2637  */
2638 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2639 {
2640         struct gfar_private *priv = netdev_priv(dev);
2641         struct gfar __iomem *regs = priv->gfargrp.regs;
2642         int idx;
2643         char tmpbuf[MAC_ADDR_LEN];
2644         u32 tempval;
2645         u32 __iomem *macptr = &regs->macstnaddr1;
2646
2647         macptr += num*2;
2648
2649         /* Now copy it into the mac registers backwards, cuz */
2650         /* little endian is silly */
2651         for (idx = 0; idx < MAC_ADDR_LEN; idx++)
2652                 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
2653
2654         gfar_write(macptr, *((u32 *) (tmpbuf)));
2655
2656         tempval = *((u32 *) (tmpbuf + 4));
2657
2658         gfar_write(macptr+1, tempval);
2659 }
2660
2661 /* GFAR error interrupt handler */
2662 static irqreturn_t gfar_error(int irq, void *grp_id)
2663 {
2664         struct gfar_priv_grp *gfargrp = grp_id;
2665         struct gfar __iomem *regs = gfargrp->regs;
2666         struct gfar_private *priv= gfargrp->priv;
2667         struct net_device *dev = priv->ndev;
2668
2669         /* Save ievent for future reference */
2670         u32 events = gfar_read(&regs->ievent);
2671
2672         /* Clear IEVENT */
2673         gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
2674
2675         /* Magic Packet is not an error. */
2676         if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2677             (events & IEVENT_MAG))
2678                 events &= ~IEVENT_MAG;
2679
2680         /* Hmm... */
2681         if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2682                 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2683                        dev->name, events, gfar_read(&regs->imask));
2684
2685         /* Update the error counters */
2686         if (events & IEVENT_TXE) {
2687                 dev->stats.tx_errors++;
2688
2689                 if (events & IEVENT_LC)
2690                         dev->stats.tx_window_errors++;
2691                 if (events & IEVENT_CRL)
2692                         dev->stats.tx_aborted_errors++;
2693                 if (events & IEVENT_XFUN) {
2694                         if (netif_msg_tx_err(priv))
2695                                 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2696                                        "packet dropped.\n", dev->name);
2697                         dev->stats.tx_dropped++;
2698                         priv->extra_stats.tx_underrun++;
2699
2700                         /* Reactivate the Tx Queues */
2701                         gfar_write(&regs->tstat, gfargrp->tstat);
2702                 }
2703                 if (netif_msg_tx_err(priv))
2704                         printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
2705         }
2706         if (events & IEVENT_BSY) {
2707                 dev->stats.rx_errors++;
2708                 priv->extra_stats.rx_bsy++;
2709
2710                 gfar_receive(irq, grp_id);
2711
2712                 if (netif_msg_rx_err(priv))
2713                         printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2714                                dev->name, gfar_read(&regs->rstat));
2715         }
2716         if (events & IEVENT_BABR) {
2717                 dev->stats.rx_errors++;
2718                 priv->extra_stats.rx_babr++;
2719
2720                 if (netif_msg_rx_err(priv))
2721                         printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
2722         }
2723         if (events & IEVENT_EBERR) {
2724                 priv->extra_stats.eberr++;
2725                 if (netif_msg_rx_err(priv))
2726                         printk(KERN_DEBUG "%s: bus error\n", dev->name);
2727         }
2728         if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
2729                 printk(KERN_DEBUG "%s: control frame\n", dev->name);
2730
2731         if (events & IEVENT_BABT) {
2732                 priv->extra_stats.tx_babt++;
2733                 if (netif_msg_tx_err(priv))
2734                         printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
2735         }
2736         return IRQ_HANDLED;
2737 }
2738
2739 static struct of_device_id gfar_match[] =
2740 {
2741         {
2742                 .type = "network",
2743                 .compatible = "gianfar",
2744         },
2745         {},
2746 };
2747 MODULE_DEVICE_TABLE(of, gfar_match);
2748
2749 /* Structure for a device driver */
2750 static struct of_platform_driver gfar_driver = {
2751         .name = "fsl-gianfar",
2752         .match_table = gfar_match,
2753
2754         .probe = gfar_probe,
2755         .remove = gfar_remove,
2756         .suspend = gfar_legacy_suspend,
2757         .resume = gfar_legacy_resume,
2758         .driver.pm = GFAR_PM_OPS,
2759 };
2760
2761 static int __init gfar_init(void)
2762 {
2763         return of_register_platform_driver(&gfar_driver);
2764 }
2765
2766 static void __exit gfar_exit(void)
2767 {
2768         of_unregister_platform_driver(&gfar_driver);
2769 }
2770
2771 module_init(gfar_init);
2772 module_exit(gfar_exit);
2773