ll_temac: Fix MAC address configuration from userland
[firefly-linux-kernel-4.4.55.git] / drivers / net / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
42
43 #include "qlge.h"
44
45 char qlge_driver_name[] = DRV_NAME;
46 const char qlge_driver_version[] = DRV_VERSION;
47
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
52
53 static const u32 default_msg =
54     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55 /* NETIF_MSG_TIMER |    */
56     NETIF_MSG_IFDOWN |
57     NETIF_MSG_IFUP |
58     NETIF_MSG_RX_ERR |
59     NETIF_MSG_TX_ERR |
60 /*  NETIF_MSG_TX_QUEUED | */
61 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65 static int debug = 0x00007fff;  /* defaults above */
66 module_param(debug, int, 0);
67 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69 #define MSIX_IRQ 0
70 #define MSI_IRQ 1
71 #define LEG_IRQ 2
72 static int qlge_irq_type = MSIX_IRQ;
73 module_param(qlge_irq_type, int, MSIX_IRQ);
74 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76 static int qlge_mpi_coredump;
77 module_param(qlge_mpi_coredump, int, 0);
78 MODULE_PARM_DESC(qlge_mpi_coredump,
79                 "Option to enable MPI firmware dump. "
80                 "Default is OFF - Do Not allocate memory. ");
81
82 static int qlge_force_coredump;
83 module_param(qlge_force_coredump, int, 0);
84 MODULE_PARM_DESC(qlge_force_coredump,
85                 "Option to allow force of firmware core dump. "
86                 "Default is OFF - Do not allow.");
87
88 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
89         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
90         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
91         /* required last entry */
92         {0,}
93 };
94
95 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96
97 /* This hardware semaphore causes exclusive access to
98  * resources shared between the NIC driver, MPI firmware,
99  * FCOE firmware and the FC driver.
100  */
101 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
102 {
103         u32 sem_bits = 0;
104
105         switch (sem_mask) {
106         case SEM_XGMAC0_MASK:
107                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
108                 break;
109         case SEM_XGMAC1_MASK:
110                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
111                 break;
112         case SEM_ICB_MASK:
113                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
114                 break;
115         case SEM_MAC_ADDR_MASK:
116                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
117                 break;
118         case SEM_FLASH_MASK:
119                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
120                 break;
121         case SEM_PROBE_MASK:
122                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
123                 break;
124         case SEM_RT_IDX_MASK:
125                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
126                 break;
127         case SEM_PROC_REG_MASK:
128                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
129                 break;
130         default:
131                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
132                 return -EINVAL;
133         }
134
135         ql_write32(qdev, SEM, sem_bits | sem_mask);
136         return !(ql_read32(qdev, SEM) & sem_bits);
137 }
138
139 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
140 {
141         unsigned int wait_count = 30;
142         do {
143                 if (!ql_sem_trylock(qdev, sem_mask))
144                         return 0;
145                 udelay(100);
146         } while (--wait_count);
147         return -ETIMEDOUT;
148 }
149
150 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
151 {
152         ql_write32(qdev, SEM, sem_mask);
153         ql_read32(qdev, SEM);   /* flush */
154 }
155
156 /* This function waits for a specific bit to come ready
157  * in a given register.  It is used mostly by the initialize
158  * process, but is also used in kernel thread API such as
159  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
160  */
161 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
162 {
163         u32 temp;
164         int count = UDELAY_COUNT;
165
166         while (count) {
167                 temp = ql_read32(qdev, reg);
168
169                 /* check for errors */
170                 if (temp & err_bit) {
171                         netif_alert(qdev, probe, qdev->ndev,
172                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
173                                     reg, temp);
174                         return -EIO;
175                 } else if (temp & bit)
176                         return 0;
177                 udelay(UDELAY_DELAY);
178                 count--;
179         }
180         netif_alert(qdev, probe, qdev->ndev,
181                     "Timed out waiting for reg %x to come ready.\n", reg);
182         return -ETIMEDOUT;
183 }
184
185 /* The CFG register is used to download TX and RX control blocks
186  * to the chip. This function waits for an operation to complete.
187  */
188 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
189 {
190         int count = UDELAY_COUNT;
191         u32 temp;
192
193         while (count) {
194                 temp = ql_read32(qdev, CFG);
195                 if (temp & CFG_LE)
196                         return -EIO;
197                 if (!(temp & bit))
198                         return 0;
199                 udelay(UDELAY_DELAY);
200                 count--;
201         }
202         return -ETIMEDOUT;
203 }
204
205
206 /* Used to issue init control blocks to hw. Maps control block,
207  * sets address, triggers download, waits for completion.
208  */
209 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
210                  u16 q_id)
211 {
212         u64 map;
213         int status = 0;
214         int direction;
215         u32 mask;
216         u32 value;
217
218         direction =
219             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
220             PCI_DMA_FROMDEVICE;
221
222         map = pci_map_single(qdev->pdev, ptr, size, direction);
223         if (pci_dma_mapping_error(qdev->pdev, map)) {
224                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
225                 return -ENOMEM;
226         }
227
228         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
229         if (status)
230                 return status;
231
232         status = ql_wait_cfg(qdev, bit);
233         if (status) {
234                 netif_err(qdev, ifup, qdev->ndev,
235                           "Timed out waiting for CFG to come ready.\n");
236                 goto exit;
237         }
238
239         ql_write32(qdev, ICB_L, (u32) map);
240         ql_write32(qdev, ICB_H, (u32) (map >> 32));
241
242         mask = CFG_Q_MASK | (bit << 16);
243         value = bit | (q_id << CFG_Q_SHIFT);
244         ql_write32(qdev, CFG, (mask | value));
245
246         /*
247          * Wait for the bit to clear after signaling hw.
248          */
249         status = ql_wait_cfg(qdev, bit);
250 exit:
251         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
252         pci_unmap_single(qdev->pdev, map, size, direction);
253         return status;
254 }
255
256 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
257 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
258                         u32 *value)
259 {
260         u32 offset = 0;
261         int status;
262
263         switch (type) {
264         case MAC_ADDR_TYPE_MULTI_MAC:
265         case MAC_ADDR_TYPE_CAM_MAC:
266                 {
267                         status =
268                             ql_wait_reg_rdy(qdev,
269                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
270                         if (status)
271                                 goto exit;
272                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
273                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
274                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
275                         status =
276                             ql_wait_reg_rdy(qdev,
277                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
278                         if (status)
279                                 goto exit;
280                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
281                         status =
282                             ql_wait_reg_rdy(qdev,
283                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
284                         if (status)
285                                 goto exit;
286                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
287                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
288                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
289                         status =
290                             ql_wait_reg_rdy(qdev,
291                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
292                         if (status)
293                                 goto exit;
294                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
295                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
296                                 status =
297                                     ql_wait_reg_rdy(qdev,
298                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
299                                 if (status)
300                                         goto exit;
301                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
302                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
303                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
304                                 status =
305                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
306                                                     MAC_ADDR_MR, 0);
307                                 if (status)
308                                         goto exit;
309                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
310                         }
311                         break;
312                 }
313         case MAC_ADDR_TYPE_VLAN:
314         case MAC_ADDR_TYPE_MULTI_FLTR:
315         default:
316                 netif_crit(qdev, ifup, qdev->ndev,
317                            "Address type %d not yet supported.\n", type);
318                 status = -EPERM;
319         }
320 exit:
321         return status;
322 }
323
324 /* Set up a MAC, multicast or VLAN address for the
325  * inbound frame matching.
326  */
327 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
328                                u16 index)
329 {
330         u32 offset = 0;
331         int status = 0;
332
333         switch (type) {
334         case MAC_ADDR_TYPE_MULTI_MAC:
335                 {
336                         u32 upper = (addr[0] << 8) | addr[1];
337                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
338                                         (addr[4] << 8) | (addr[5]);
339
340                         status =
341                                 ql_wait_reg_rdy(qdev,
342                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
343                         if (status)
344                                 goto exit;
345                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
346                                 (index << MAC_ADDR_IDX_SHIFT) |
347                                 type | MAC_ADDR_E);
348                         ql_write32(qdev, MAC_ADDR_DATA, lower);
349                         status =
350                                 ql_wait_reg_rdy(qdev,
351                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352                         if (status)
353                                 goto exit;
354                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
355                                 (index << MAC_ADDR_IDX_SHIFT) |
356                                 type | MAC_ADDR_E);
357
358                         ql_write32(qdev, MAC_ADDR_DATA, upper);
359                         status =
360                                 ql_wait_reg_rdy(qdev,
361                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
362                         if (status)
363                                 goto exit;
364                         break;
365                 }
366         case MAC_ADDR_TYPE_CAM_MAC:
367                 {
368                         u32 cam_output;
369                         u32 upper = (addr[0] << 8) | addr[1];
370                         u32 lower =
371                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
372                             (addr[5]);
373
374                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
375                                      "Adding %s address %pM at index %d in the CAM.\n",
376                                      type == MAC_ADDR_TYPE_MULTI_MAC ?
377                                      "MULTICAST" : "UNICAST",
378                                      addr, index);
379
380                         status =
381                             ql_wait_reg_rdy(qdev,
382                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
383                         if (status)
384                                 goto exit;
385                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
386                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
387                                    type);       /* type */
388                         ql_write32(qdev, MAC_ADDR_DATA, lower);
389                         status =
390                             ql_wait_reg_rdy(qdev,
391                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
392                         if (status)
393                                 goto exit;
394                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
395                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
396                                    type);       /* type */
397                         ql_write32(qdev, MAC_ADDR_DATA, upper);
398                         status =
399                             ql_wait_reg_rdy(qdev,
400                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
401                         if (status)
402                                 goto exit;
403                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
404                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
405                                    type);       /* type */
406                         /* This field should also include the queue id
407                            and possibly the function id.  Right now we hardcode
408                            the route field to NIC core.
409                          */
410                         cam_output = (CAM_OUT_ROUTE_NIC |
411                                       (qdev->
412                                        func << CAM_OUT_FUNC_SHIFT) |
413                                         (0 << CAM_OUT_CQ_ID_SHIFT));
414                         if (qdev->vlgrp)
415                                 cam_output |= CAM_OUT_RV;
416                         /* route to NIC core */
417                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
418                         break;
419                 }
420         case MAC_ADDR_TYPE_VLAN:
421                 {
422                         u32 enable_bit = *((u32 *) &addr[0]);
423                         /* For VLAN, the addr actually holds a bit that
424                          * either enables or disables the vlan id we are
425                          * addressing. It's either MAC_ADDR_E on or off.
426                          * That's bit-27 we're talking about.
427                          */
428                         netif_info(qdev, ifup, qdev->ndev,
429                                    "%s VLAN ID %d %s the CAM.\n",
430                                    enable_bit ? "Adding" : "Removing",
431                                    index,
432                                    enable_bit ? "to" : "from");
433
434                         status =
435                             ql_wait_reg_rdy(qdev,
436                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
437                         if (status)
438                                 goto exit;
439                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
440                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
441                                    type |       /* type */
442                                    enable_bit); /* enable/disable */
443                         break;
444                 }
445         case MAC_ADDR_TYPE_MULTI_FLTR:
446         default:
447                 netif_crit(qdev, ifup, qdev->ndev,
448                            "Address type %d not yet supported.\n", type);
449                 status = -EPERM;
450         }
451 exit:
452         return status;
453 }
454
455 /* Set or clear MAC address in hardware. We sometimes
456  * have to clear it to prevent wrong frame routing
457  * especially in a bonding environment.
458  */
459 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
460 {
461         int status;
462         char zero_mac_addr[ETH_ALEN];
463         char *addr;
464
465         if (set) {
466                 addr = &qdev->ndev->dev_addr[0];
467                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
468                              "Set Mac addr %pM\n", addr);
469         } else {
470                 memset(zero_mac_addr, 0, ETH_ALEN);
471                 addr = &zero_mac_addr[0];
472                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473                              "Clearing MAC address\n");
474         }
475         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
476         if (status)
477                 return status;
478         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
479                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
480         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
481         if (status)
482                 netif_err(qdev, ifup, qdev->ndev,
483                           "Failed to init mac address.\n");
484         return status;
485 }
486
487 void ql_link_on(struct ql_adapter *qdev)
488 {
489         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
490         netif_carrier_on(qdev->ndev);
491         ql_set_mac_addr(qdev, 1);
492 }
493
494 void ql_link_off(struct ql_adapter *qdev)
495 {
496         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
497         netif_carrier_off(qdev->ndev);
498         ql_set_mac_addr(qdev, 0);
499 }
500
501 /* Get a specific frame routing value from the CAM.
502  * Used for debug and reg dump.
503  */
504 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
505 {
506         int status = 0;
507
508         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
509         if (status)
510                 goto exit;
511
512         ql_write32(qdev, RT_IDX,
513                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
514         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
515         if (status)
516                 goto exit;
517         *value = ql_read32(qdev, RT_DATA);
518 exit:
519         return status;
520 }
521
522 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
523  * to route different frame types to various inbound queues.  We send broadcast/
524  * multicast/error frames to the default queue for slow handling,
525  * and CAM hit/RSS frames to the fast handling queues.
526  */
527 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
528                               int enable)
529 {
530         int status = -EINVAL; /* Return error if no mask match. */
531         u32 value = 0;
532
533         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
534                      "%s %s mask %s the routing reg.\n",
535                      enable ? "Adding" : "Removing",
536                      index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
537                      index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
538                      index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
539                      index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
540                      index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
541                      index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
542                      index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
543                      index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
544                      index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
545                      index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
546                      index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
547                      index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
548                      index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
549                      index == RT_IDX_UNUSED013 ? "UNUSED13" :
550                      index == RT_IDX_UNUSED014 ? "UNUSED14" :
551                      index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
552                      "(Bad index != RT_IDX)",
553                      enable ? "to" : "from");
554
555         switch (mask) {
556         case RT_IDX_CAM_HIT:
557                 {
558                         value = RT_IDX_DST_CAM_Q |      /* dest */
559                             RT_IDX_TYPE_NICQ |  /* type */
560                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
561                         break;
562                 }
563         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
564                 {
565                         value = RT_IDX_DST_DFLT_Q |     /* dest */
566                             RT_IDX_TYPE_NICQ |  /* type */
567                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
568                         break;
569                 }
570         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
571                 {
572                         value = RT_IDX_DST_DFLT_Q |     /* dest */
573                             RT_IDX_TYPE_NICQ |  /* type */
574                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
575                         break;
576                 }
577         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
578                 {
579                         value = RT_IDX_DST_DFLT_Q |     /* dest */
580                             RT_IDX_TYPE_NICQ |  /* type */
581                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
582                         break;
583                 }
584         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
585                 {
586                         value = RT_IDX_DST_DFLT_Q |     /* dest */
587                             RT_IDX_TYPE_NICQ |  /* type */
588                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
589                         break;
590                 }
591         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
592                 {
593                         value = RT_IDX_DST_DFLT_Q |     /* dest */
594                             RT_IDX_TYPE_NICQ |  /* type */
595                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
596                         break;
597                 }
598         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
599                 {
600                         value = RT_IDX_DST_RSS |        /* dest */
601                             RT_IDX_TYPE_NICQ |  /* type */
602                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
603                         break;
604                 }
605         case 0:         /* Clear the E-bit on an entry. */
606                 {
607                         value = RT_IDX_DST_DFLT_Q |     /* dest */
608                             RT_IDX_TYPE_NICQ |  /* type */
609                             (index << RT_IDX_IDX_SHIFT);/* index */
610                         break;
611                 }
612         default:
613                 netif_err(qdev, ifup, qdev->ndev,
614                           "Mask type %d not yet supported.\n", mask);
615                 status = -EPERM;
616                 goto exit;
617         }
618
619         if (value) {
620                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
621                 if (status)
622                         goto exit;
623                 value |= (enable ? RT_IDX_E : 0);
624                 ql_write32(qdev, RT_IDX, value);
625                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
626         }
627 exit:
628         return status;
629 }
630
631 static void ql_enable_interrupts(struct ql_adapter *qdev)
632 {
633         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
634 }
635
636 static void ql_disable_interrupts(struct ql_adapter *qdev)
637 {
638         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
639 }
640
641 /* If we're running with multiple MSI-X vectors then we enable on the fly.
642  * Otherwise, we may have multiple outstanding workers and don't want to
643  * enable until the last one finishes. In this case, the irq_cnt gets
644  * incremented everytime we queue a worker and decremented everytime
645  * a worker finishes.  Once it hits zero we enable the interrupt.
646  */
647 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
648 {
649         u32 var = 0;
650         unsigned long hw_flags = 0;
651         struct intr_context *ctx = qdev->intr_context + intr;
652
653         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
654                 /* Always enable if we're MSIX multi interrupts and
655                  * it's not the default (zeroeth) interrupt.
656                  */
657                 ql_write32(qdev, INTR_EN,
658                            ctx->intr_en_mask);
659                 var = ql_read32(qdev, STS);
660                 return var;
661         }
662
663         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
664         if (atomic_dec_and_test(&ctx->irq_cnt)) {
665                 ql_write32(qdev, INTR_EN,
666                            ctx->intr_en_mask);
667                 var = ql_read32(qdev, STS);
668         }
669         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
670         return var;
671 }
672
673 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
674 {
675         u32 var = 0;
676         struct intr_context *ctx;
677
678         /* HW disables for us if we're MSIX multi interrupts and
679          * it's not the default (zeroeth) interrupt.
680          */
681         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
682                 return 0;
683
684         ctx = qdev->intr_context + intr;
685         spin_lock(&qdev->hw_lock);
686         if (!atomic_read(&ctx->irq_cnt)) {
687                 ql_write32(qdev, INTR_EN,
688                 ctx->intr_dis_mask);
689                 var = ql_read32(qdev, STS);
690         }
691         atomic_inc(&ctx->irq_cnt);
692         spin_unlock(&qdev->hw_lock);
693         return var;
694 }
695
696 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
697 {
698         int i;
699         for (i = 0; i < qdev->intr_count; i++) {
700                 /* The enable call does a atomic_dec_and_test
701                  * and enables only if the result is zero.
702                  * So we precharge it here.
703                  */
704                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
705                         i == 0))
706                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
707                 ql_enable_completion_interrupt(qdev, i);
708         }
709
710 }
711
712 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
713 {
714         int status, i;
715         u16 csum = 0;
716         __le16 *flash = (__le16 *)&qdev->flash;
717
718         status = strncmp((char *)&qdev->flash, str, 4);
719         if (status) {
720                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
721                 return  status;
722         }
723
724         for (i = 0; i < size; i++)
725                 csum += le16_to_cpu(*flash++);
726
727         if (csum)
728                 netif_err(qdev, ifup, qdev->ndev,
729                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
730
731         return csum;
732 }
733
734 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
735 {
736         int status = 0;
737         /* wait for reg to come ready */
738         status = ql_wait_reg_rdy(qdev,
739                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
740         if (status)
741                 goto exit;
742         /* set up for reg read */
743         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
744         /* wait for reg to come ready */
745         status = ql_wait_reg_rdy(qdev,
746                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
747         if (status)
748                 goto exit;
749          /* This data is stored on flash as an array of
750          * __le32.  Since ql_read32() returns cpu endian
751          * we need to swap it back.
752          */
753         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
754 exit:
755         return status;
756 }
757
758 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
759 {
760         u32 i, size;
761         int status;
762         __le32 *p = (__le32 *)&qdev->flash;
763         u32 offset;
764         u8 mac_addr[6];
765
766         /* Get flash offset for function and adjust
767          * for dword access.
768          */
769         if (!qdev->port)
770                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
771         else
772                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
773
774         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
775                 return -ETIMEDOUT;
776
777         size = sizeof(struct flash_params_8000) / sizeof(u32);
778         for (i = 0; i < size; i++, p++) {
779                 status = ql_read_flash_word(qdev, i+offset, p);
780                 if (status) {
781                         netif_err(qdev, ifup, qdev->ndev,
782                                   "Error reading flash.\n");
783                         goto exit;
784                 }
785         }
786
787         status = ql_validate_flash(qdev,
788                         sizeof(struct flash_params_8000) / sizeof(u16),
789                         "8000");
790         if (status) {
791                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
792                 status = -EINVAL;
793                 goto exit;
794         }
795
796         /* Extract either manufacturer or BOFM modified
797          * MAC address.
798          */
799         if (qdev->flash.flash_params_8000.data_type1 == 2)
800                 memcpy(mac_addr,
801                         qdev->flash.flash_params_8000.mac_addr1,
802                         qdev->ndev->addr_len);
803         else
804                 memcpy(mac_addr,
805                         qdev->flash.flash_params_8000.mac_addr,
806                         qdev->ndev->addr_len);
807
808         if (!is_valid_ether_addr(mac_addr)) {
809                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
810                 status = -EINVAL;
811                 goto exit;
812         }
813
814         memcpy(qdev->ndev->dev_addr,
815                 mac_addr,
816                 qdev->ndev->addr_len);
817
818 exit:
819         ql_sem_unlock(qdev, SEM_FLASH_MASK);
820         return status;
821 }
822
823 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
824 {
825         int i;
826         int status;
827         __le32 *p = (__le32 *)&qdev->flash;
828         u32 offset = 0;
829         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
830
831         /* Second function's parameters follow the first
832          * function's.
833          */
834         if (qdev->port)
835                 offset = size;
836
837         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
838                 return -ETIMEDOUT;
839
840         for (i = 0; i < size; i++, p++) {
841                 status = ql_read_flash_word(qdev, i+offset, p);
842                 if (status) {
843                         netif_err(qdev, ifup, qdev->ndev,
844                                   "Error reading flash.\n");
845                         goto exit;
846                 }
847
848         }
849
850         status = ql_validate_flash(qdev,
851                         sizeof(struct flash_params_8012) / sizeof(u16),
852                         "8012");
853         if (status) {
854                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
855                 status = -EINVAL;
856                 goto exit;
857         }
858
859         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
860                 status = -EINVAL;
861                 goto exit;
862         }
863
864         memcpy(qdev->ndev->dev_addr,
865                 qdev->flash.flash_params_8012.mac_addr,
866                 qdev->ndev->addr_len);
867
868 exit:
869         ql_sem_unlock(qdev, SEM_FLASH_MASK);
870         return status;
871 }
872
873 /* xgmac register are located behind the xgmac_addr and xgmac_data
874  * register pair.  Each read/write requires us to wait for the ready
875  * bit before reading/writing the data.
876  */
877 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
878 {
879         int status;
880         /* wait for reg to come ready */
881         status = ql_wait_reg_rdy(qdev,
882                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
883         if (status)
884                 return status;
885         /* write the data to the data reg */
886         ql_write32(qdev, XGMAC_DATA, data);
887         /* trigger the write */
888         ql_write32(qdev, XGMAC_ADDR, reg);
889         return status;
890 }
891
892 /* xgmac register are located behind the xgmac_addr and xgmac_data
893  * register pair.  Each read/write requires us to wait for the ready
894  * bit before reading/writing the data.
895  */
896 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
897 {
898         int status = 0;
899         /* wait for reg to come ready */
900         status = ql_wait_reg_rdy(qdev,
901                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
902         if (status)
903                 goto exit;
904         /* set up for reg read */
905         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
906         /* wait for reg to come ready */
907         status = ql_wait_reg_rdy(qdev,
908                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
909         if (status)
910                 goto exit;
911         /* get the data */
912         *data = ql_read32(qdev, XGMAC_DATA);
913 exit:
914         return status;
915 }
916
917 /* This is used for reading the 64-bit statistics regs. */
918 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
919 {
920         int status = 0;
921         u32 hi = 0;
922         u32 lo = 0;
923
924         status = ql_read_xgmac_reg(qdev, reg, &lo);
925         if (status)
926                 goto exit;
927
928         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
929         if (status)
930                 goto exit;
931
932         *data = (u64) lo | ((u64) hi << 32);
933
934 exit:
935         return status;
936 }
937
938 static int ql_8000_port_initialize(struct ql_adapter *qdev)
939 {
940         int status;
941         /*
942          * Get MPI firmware version for driver banner
943          * and ethool info.
944          */
945         status = ql_mb_about_fw(qdev);
946         if (status)
947                 goto exit;
948         status = ql_mb_get_fw_state(qdev);
949         if (status)
950                 goto exit;
951         /* Wake up a worker to get/set the TX/RX frame sizes. */
952         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
953 exit:
954         return status;
955 }
956
957 /* Take the MAC Core out of reset.
958  * Enable statistics counting.
959  * Take the transmitter/receiver out of reset.
960  * This functionality may be done in the MPI firmware at a
961  * later date.
962  */
963 static int ql_8012_port_initialize(struct ql_adapter *qdev)
964 {
965         int status = 0;
966         u32 data;
967
968         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
969                 /* Another function has the semaphore, so
970                  * wait for the port init bit to come ready.
971                  */
972                 netif_info(qdev, link, qdev->ndev,
973                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
974                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
975                 if (status) {
976                         netif_crit(qdev, link, qdev->ndev,
977                                    "Port initialize timed out.\n");
978                 }
979                 return status;
980         }
981
982         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
983         /* Set the core reset. */
984         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
985         if (status)
986                 goto end;
987         data |= GLOBAL_CFG_RESET;
988         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
989         if (status)
990                 goto end;
991
992         /* Clear the core reset and turn on jumbo for receiver. */
993         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
994         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
995         data |= GLOBAL_CFG_TX_STAT_EN;
996         data |= GLOBAL_CFG_RX_STAT_EN;
997         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
998         if (status)
999                 goto end;
1000
1001         /* Enable transmitter, and clear it's reset. */
1002         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1003         if (status)
1004                 goto end;
1005         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
1006         data |= TX_CFG_EN;      /* Enable the transmitter. */
1007         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1008         if (status)
1009                 goto end;
1010
1011         /* Enable receiver and clear it's reset. */
1012         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1013         if (status)
1014                 goto end;
1015         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1016         data |= RX_CFG_EN;      /* Enable the receiver. */
1017         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1018         if (status)
1019                 goto end;
1020
1021         /* Turn on jumbo. */
1022         status =
1023             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1024         if (status)
1025                 goto end;
1026         status =
1027             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1028         if (status)
1029                 goto end;
1030
1031         /* Signal to the world that the port is enabled.        */
1032         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1033 end:
1034         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1035         return status;
1036 }
1037
1038 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1039 {
1040         return PAGE_SIZE << qdev->lbq_buf_order;
1041 }
1042
1043 /* Get the next large buffer. */
1044 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1045 {
1046         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1047         rx_ring->lbq_curr_idx++;
1048         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1049                 rx_ring->lbq_curr_idx = 0;
1050         rx_ring->lbq_free_cnt++;
1051         return lbq_desc;
1052 }
1053
1054 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1055                 struct rx_ring *rx_ring)
1056 {
1057         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1058
1059         pci_dma_sync_single_for_cpu(qdev->pdev,
1060                                         pci_unmap_addr(lbq_desc, mapaddr),
1061                                     rx_ring->lbq_buf_size,
1062                                         PCI_DMA_FROMDEVICE);
1063
1064         /* If it's the last chunk of our master page then
1065          * we unmap it.
1066          */
1067         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1068                                         == ql_lbq_block_size(qdev))
1069                 pci_unmap_page(qdev->pdev,
1070                                 lbq_desc->p.pg_chunk.map,
1071                                 ql_lbq_block_size(qdev),
1072                                 PCI_DMA_FROMDEVICE);
1073         return lbq_desc;
1074 }
1075
1076 /* Get the next small buffer. */
1077 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1078 {
1079         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1080         rx_ring->sbq_curr_idx++;
1081         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1082                 rx_ring->sbq_curr_idx = 0;
1083         rx_ring->sbq_free_cnt++;
1084         return sbq_desc;
1085 }
1086
1087 /* Update an rx ring index. */
1088 static void ql_update_cq(struct rx_ring *rx_ring)
1089 {
1090         rx_ring->cnsmr_idx++;
1091         rx_ring->curr_entry++;
1092         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1093                 rx_ring->cnsmr_idx = 0;
1094                 rx_ring->curr_entry = rx_ring->cq_base;
1095         }
1096 }
1097
1098 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1099 {
1100         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1101 }
1102
1103 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1104                                                 struct bq_desc *lbq_desc)
1105 {
1106         if (!rx_ring->pg_chunk.page) {
1107                 u64 map;
1108                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1109                                                 GFP_ATOMIC,
1110                                                 qdev->lbq_buf_order);
1111                 if (unlikely(!rx_ring->pg_chunk.page)) {
1112                         netif_err(qdev, drv, qdev->ndev,
1113                                   "page allocation failed.\n");
1114                         return -ENOMEM;
1115                 }
1116                 rx_ring->pg_chunk.offset = 0;
1117                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1118                                         0, ql_lbq_block_size(qdev),
1119                                         PCI_DMA_FROMDEVICE);
1120                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1121                         __free_pages(rx_ring->pg_chunk.page,
1122                                         qdev->lbq_buf_order);
1123                         netif_err(qdev, drv, qdev->ndev,
1124                                   "PCI mapping failed.\n");
1125                         return -ENOMEM;
1126                 }
1127                 rx_ring->pg_chunk.map = map;
1128                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1129         }
1130
1131         /* Copy the current master pg_chunk info
1132          * to the current descriptor.
1133          */
1134         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1135
1136         /* Adjust the master page chunk for next
1137          * buffer get.
1138          */
1139         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1140         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1141                 rx_ring->pg_chunk.page = NULL;
1142                 lbq_desc->p.pg_chunk.last_flag = 1;
1143         } else {
1144                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1145                 get_page(rx_ring->pg_chunk.page);
1146                 lbq_desc->p.pg_chunk.last_flag = 0;
1147         }
1148         return 0;
1149 }
1150 /* Process (refill) a large buffer queue. */
1151 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1152 {
1153         u32 clean_idx = rx_ring->lbq_clean_idx;
1154         u32 start_idx = clean_idx;
1155         struct bq_desc *lbq_desc;
1156         u64 map;
1157         int i;
1158
1159         while (rx_ring->lbq_free_cnt > 32) {
1160                 for (i = 0; i < 16; i++) {
1161                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1162                                      "lbq: try cleaning clean_idx = %d.\n",
1163                                      clean_idx);
1164                         lbq_desc = &rx_ring->lbq[clean_idx];
1165                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1166                                 netif_err(qdev, ifup, qdev->ndev,
1167                                           "Could not get a page chunk.\n");
1168                                 return;
1169                         }
1170
1171                         map = lbq_desc->p.pg_chunk.map +
1172                                 lbq_desc->p.pg_chunk.offset;
1173                                 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1174                         pci_unmap_len_set(lbq_desc, maplen,
1175                                         rx_ring->lbq_buf_size);
1176                                 *lbq_desc->addr = cpu_to_le64(map);
1177
1178                         pci_dma_sync_single_for_device(qdev->pdev, map,
1179                                                 rx_ring->lbq_buf_size,
1180                                                 PCI_DMA_FROMDEVICE);
1181                         clean_idx++;
1182                         if (clean_idx == rx_ring->lbq_len)
1183                                 clean_idx = 0;
1184                 }
1185
1186                 rx_ring->lbq_clean_idx = clean_idx;
1187                 rx_ring->lbq_prod_idx += 16;
1188                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1189                         rx_ring->lbq_prod_idx = 0;
1190                 rx_ring->lbq_free_cnt -= 16;
1191         }
1192
1193         if (start_idx != clean_idx) {
1194                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1195                              "lbq: updating prod idx = %d.\n",
1196                              rx_ring->lbq_prod_idx);
1197                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1198                                 rx_ring->lbq_prod_idx_db_reg);
1199         }
1200 }
1201
1202 /* Process (refill) a small buffer queue. */
1203 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1204 {
1205         u32 clean_idx = rx_ring->sbq_clean_idx;
1206         u32 start_idx = clean_idx;
1207         struct bq_desc *sbq_desc;
1208         u64 map;
1209         int i;
1210
1211         while (rx_ring->sbq_free_cnt > 16) {
1212                 for (i = 0; i < 16; i++) {
1213                         sbq_desc = &rx_ring->sbq[clean_idx];
1214                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1215                                      "sbq: try cleaning clean_idx = %d.\n",
1216                                      clean_idx);
1217                         if (sbq_desc->p.skb == NULL) {
1218                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1219                                              qdev->ndev,
1220                                              "sbq: getting new skb for index %d.\n",
1221                                              sbq_desc->index);
1222                                 sbq_desc->p.skb =
1223                                     netdev_alloc_skb(qdev->ndev,
1224                                                      SMALL_BUFFER_SIZE);
1225                                 if (sbq_desc->p.skb == NULL) {
1226                                         netif_err(qdev, probe, qdev->ndev,
1227                                                   "Couldn't get an skb.\n");
1228                                         rx_ring->sbq_clean_idx = clean_idx;
1229                                         return;
1230                                 }
1231                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1232                                 map = pci_map_single(qdev->pdev,
1233                                                      sbq_desc->p.skb->data,
1234                                                      rx_ring->sbq_buf_size,
1235                                                      PCI_DMA_FROMDEVICE);
1236                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1237                                         netif_err(qdev, ifup, qdev->ndev,
1238                                                   "PCI mapping failed.\n");
1239                                         rx_ring->sbq_clean_idx = clean_idx;
1240                                         dev_kfree_skb_any(sbq_desc->p.skb);
1241                                         sbq_desc->p.skb = NULL;
1242                                         return;
1243                                 }
1244                                 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1245                                 pci_unmap_len_set(sbq_desc, maplen,
1246                                                   rx_ring->sbq_buf_size);
1247                                 *sbq_desc->addr = cpu_to_le64(map);
1248                         }
1249
1250                         clean_idx++;
1251                         if (clean_idx == rx_ring->sbq_len)
1252                                 clean_idx = 0;
1253                 }
1254                 rx_ring->sbq_clean_idx = clean_idx;
1255                 rx_ring->sbq_prod_idx += 16;
1256                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1257                         rx_ring->sbq_prod_idx = 0;
1258                 rx_ring->sbq_free_cnt -= 16;
1259         }
1260
1261         if (start_idx != clean_idx) {
1262                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1263                              "sbq: updating prod idx = %d.\n",
1264                              rx_ring->sbq_prod_idx);
1265                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1266                                 rx_ring->sbq_prod_idx_db_reg);
1267         }
1268 }
1269
1270 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1271                                     struct rx_ring *rx_ring)
1272 {
1273         ql_update_sbq(qdev, rx_ring);
1274         ql_update_lbq(qdev, rx_ring);
1275 }
1276
1277 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1278  * fails at some stage, or from the interrupt when a tx completes.
1279  */
1280 static void ql_unmap_send(struct ql_adapter *qdev,
1281                           struct tx_ring_desc *tx_ring_desc, int mapped)
1282 {
1283         int i;
1284         for (i = 0; i < mapped; i++) {
1285                 if (i == 0 || (i == 7 && mapped > 7)) {
1286                         /*
1287                          * Unmap the skb->data area, or the
1288                          * external sglist (AKA the Outbound
1289                          * Address List (OAL)).
1290                          * If its the zeroeth element, then it's
1291                          * the skb->data area.  If it's the 7th
1292                          * element and there is more than 6 frags,
1293                          * then its an OAL.
1294                          */
1295                         if (i == 7) {
1296                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1297                                              qdev->ndev,
1298                                              "unmapping OAL area.\n");
1299                         }
1300                         pci_unmap_single(qdev->pdev,
1301                                          pci_unmap_addr(&tx_ring_desc->map[i],
1302                                                         mapaddr),
1303                                          pci_unmap_len(&tx_ring_desc->map[i],
1304                                                        maplen),
1305                                          PCI_DMA_TODEVICE);
1306                 } else {
1307                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1308                                      "unmapping frag %d.\n", i);
1309                         pci_unmap_page(qdev->pdev,
1310                                        pci_unmap_addr(&tx_ring_desc->map[i],
1311                                                       mapaddr),
1312                                        pci_unmap_len(&tx_ring_desc->map[i],
1313                                                      maplen), PCI_DMA_TODEVICE);
1314                 }
1315         }
1316
1317 }
1318
1319 /* Map the buffers for this transmit.  This will return
1320  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1321  */
1322 static int ql_map_send(struct ql_adapter *qdev,
1323                        struct ob_mac_iocb_req *mac_iocb_ptr,
1324                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1325 {
1326         int len = skb_headlen(skb);
1327         dma_addr_t map;
1328         int frag_idx, err, map_idx = 0;
1329         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1330         int frag_cnt = skb_shinfo(skb)->nr_frags;
1331
1332         if (frag_cnt) {
1333                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1334                              "frag_cnt = %d.\n", frag_cnt);
1335         }
1336         /*
1337          * Map the skb buffer first.
1338          */
1339         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1340
1341         err = pci_dma_mapping_error(qdev->pdev, map);
1342         if (err) {
1343                 netif_err(qdev, tx_queued, qdev->ndev,
1344                           "PCI mapping failed with error: %d\n", err);
1345
1346                 return NETDEV_TX_BUSY;
1347         }
1348
1349         tbd->len = cpu_to_le32(len);
1350         tbd->addr = cpu_to_le64(map);
1351         pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1352         pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1353         map_idx++;
1354
1355         /*
1356          * This loop fills the remainder of the 8 address descriptors
1357          * in the IOCB.  If there are more than 7 fragments, then the
1358          * eighth address desc will point to an external list (OAL).
1359          * When this happens, the remainder of the frags will be stored
1360          * in this list.
1361          */
1362         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1363                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1364                 tbd++;
1365                 if (frag_idx == 6 && frag_cnt > 7) {
1366                         /* Let's tack on an sglist.
1367                          * Our control block will now
1368                          * look like this:
1369                          * iocb->seg[0] = skb->data
1370                          * iocb->seg[1] = frag[0]
1371                          * iocb->seg[2] = frag[1]
1372                          * iocb->seg[3] = frag[2]
1373                          * iocb->seg[4] = frag[3]
1374                          * iocb->seg[5] = frag[4]
1375                          * iocb->seg[6] = frag[5]
1376                          * iocb->seg[7] = ptr to OAL (external sglist)
1377                          * oal->seg[0] = frag[6]
1378                          * oal->seg[1] = frag[7]
1379                          * oal->seg[2] = frag[8]
1380                          * oal->seg[3] = frag[9]
1381                          * oal->seg[4] = frag[10]
1382                          *      etc...
1383                          */
1384                         /* Tack on the OAL in the eighth segment of IOCB. */
1385                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1386                                              sizeof(struct oal),
1387                                              PCI_DMA_TODEVICE);
1388                         err = pci_dma_mapping_error(qdev->pdev, map);
1389                         if (err) {
1390                                 netif_err(qdev, tx_queued, qdev->ndev,
1391                                           "PCI mapping outbound address list with error: %d\n",
1392                                           err);
1393                                 goto map_error;
1394                         }
1395
1396                         tbd->addr = cpu_to_le64(map);
1397                         /*
1398                          * The length is the number of fragments
1399                          * that remain to be mapped times the length
1400                          * of our sglist (OAL).
1401                          */
1402                         tbd->len =
1403                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1404                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1405                         pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1406                                            map);
1407                         pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1408                                           sizeof(struct oal));
1409                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1410                         map_idx++;
1411                 }
1412
1413                 map =
1414                     pci_map_page(qdev->pdev, frag->page,
1415                                  frag->page_offset, frag->size,
1416                                  PCI_DMA_TODEVICE);
1417
1418                 err = pci_dma_mapping_error(qdev->pdev, map);
1419                 if (err) {
1420                         netif_err(qdev, tx_queued, qdev->ndev,
1421                                   "PCI mapping frags failed with error: %d.\n",
1422                                   err);
1423                         goto map_error;
1424                 }
1425
1426                 tbd->addr = cpu_to_le64(map);
1427                 tbd->len = cpu_to_le32(frag->size);
1428                 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1429                 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1430                                   frag->size);
1431
1432         }
1433         /* Save the number of segments we've mapped. */
1434         tx_ring_desc->map_cnt = map_idx;
1435         /* Terminate the last segment. */
1436         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1437         return NETDEV_TX_OK;
1438
1439 map_error:
1440         /*
1441          * If the first frag mapping failed, then i will be zero.
1442          * This causes the unmap of the skb->data area.  Otherwise
1443          * we pass in the number of frags that mapped successfully
1444          * so they can be umapped.
1445          */
1446         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1447         return NETDEV_TX_BUSY;
1448 }
1449
1450 /* Process an inbound completion from an rx ring. */
1451 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1452                                         struct rx_ring *rx_ring,
1453                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1454                                         u32 length,
1455                                         u16 vlan_id)
1456 {
1457         struct sk_buff *skb;
1458         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1459         struct skb_frag_struct *rx_frag;
1460         int nr_frags;
1461         struct napi_struct *napi = &rx_ring->napi;
1462
1463         napi->dev = qdev->ndev;
1464
1465         skb = napi_get_frags(napi);
1466         if (!skb) {
1467                 netif_err(qdev, drv, qdev->ndev,
1468                           "Couldn't get an skb, exiting.\n");
1469                 rx_ring->rx_dropped++;
1470                 put_page(lbq_desc->p.pg_chunk.page);
1471                 return;
1472         }
1473         prefetch(lbq_desc->p.pg_chunk.va);
1474         rx_frag = skb_shinfo(skb)->frags;
1475         nr_frags = skb_shinfo(skb)->nr_frags;
1476         rx_frag += nr_frags;
1477         rx_frag->page = lbq_desc->p.pg_chunk.page;
1478         rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1479         rx_frag->size = length;
1480
1481         skb->len += length;
1482         skb->data_len += length;
1483         skb->truesize += length;
1484         skb_shinfo(skb)->nr_frags++;
1485
1486         rx_ring->rx_packets++;
1487         rx_ring->rx_bytes += length;
1488         skb->ip_summed = CHECKSUM_UNNECESSARY;
1489         skb_record_rx_queue(skb, rx_ring->cq_id);
1490         if (qdev->vlgrp && (vlan_id != 0xffff))
1491                 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1492         else
1493                 napi_gro_frags(napi);
1494 }
1495
1496 /* Process an inbound completion from an rx ring. */
1497 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1498                                         struct rx_ring *rx_ring,
1499                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1500                                         u32 length,
1501                                         u16 vlan_id)
1502 {
1503         struct net_device *ndev = qdev->ndev;
1504         struct sk_buff *skb = NULL;
1505         void *addr;
1506         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1507         struct napi_struct *napi = &rx_ring->napi;
1508
1509         skb = netdev_alloc_skb(ndev, length);
1510         if (!skb) {
1511                 netif_err(qdev, drv, qdev->ndev,
1512                           "Couldn't get an skb, need to unwind!.\n");
1513                 rx_ring->rx_dropped++;
1514                 put_page(lbq_desc->p.pg_chunk.page);
1515                 return;
1516         }
1517
1518         addr = lbq_desc->p.pg_chunk.va;
1519         prefetch(addr);
1520
1521
1522         /* Frame error, so drop the packet. */
1523         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1524                 netif_err(qdev, drv, qdev->ndev,
1525                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1526                 rx_ring->rx_errors++;
1527                 goto err_out;
1528         }
1529
1530         /* The max framesize filter on this chip is set higher than
1531          * MTU since FCoE uses 2k frames.
1532          */
1533         if (skb->len > ndev->mtu + ETH_HLEN) {
1534                 netif_err(qdev, drv, qdev->ndev,
1535                           "Segment too small, dropping.\n");
1536                 rx_ring->rx_dropped++;
1537                 goto err_out;
1538         }
1539         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1540         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1541                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1542                      length);
1543         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1544                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1545                                 length-ETH_HLEN);
1546         skb->len += length-ETH_HLEN;
1547         skb->data_len += length-ETH_HLEN;
1548         skb->truesize += length-ETH_HLEN;
1549
1550         rx_ring->rx_packets++;
1551         rx_ring->rx_bytes += skb->len;
1552         skb->protocol = eth_type_trans(skb, ndev);
1553         skb->ip_summed = CHECKSUM_NONE;
1554
1555         if (qdev->rx_csum &&
1556                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1557                 /* TCP frame. */
1558                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1559                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1560                                      "TCP checksum done!\n");
1561                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1562                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1563                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1564                         /* Unfragmented ipv4 UDP frame. */
1565                         struct iphdr *iph = (struct iphdr *) skb->data;
1566                         if (!(iph->frag_off &
1567                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1568                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1569                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1570                                              qdev->ndev,
1571                                              "TCP checksum done!\n");
1572                         }
1573                 }
1574         }
1575
1576         skb_record_rx_queue(skb, rx_ring->cq_id);
1577         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1578                 if (qdev->vlgrp && (vlan_id != 0xffff))
1579                         vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1580                 else
1581                         napi_gro_receive(napi, skb);
1582         } else {
1583                 if (qdev->vlgrp && (vlan_id != 0xffff))
1584                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1585                 else
1586                         netif_receive_skb(skb);
1587         }
1588         return;
1589 err_out:
1590         dev_kfree_skb_any(skb);
1591         put_page(lbq_desc->p.pg_chunk.page);
1592 }
1593
1594 /* Process an inbound completion from an rx ring. */
1595 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1596                                         struct rx_ring *rx_ring,
1597                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1598                                         u32 length,
1599                                         u16 vlan_id)
1600 {
1601         struct net_device *ndev = qdev->ndev;
1602         struct sk_buff *skb = NULL;
1603         struct sk_buff *new_skb = NULL;
1604         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1605
1606         skb = sbq_desc->p.skb;
1607         /* Allocate new_skb and copy */
1608         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1609         if (new_skb == NULL) {
1610                 netif_err(qdev, probe, qdev->ndev,
1611                           "No skb available, drop the packet.\n");
1612                 rx_ring->rx_dropped++;
1613                 return;
1614         }
1615         skb_reserve(new_skb, NET_IP_ALIGN);
1616         memcpy(skb_put(new_skb, length), skb->data, length);
1617         skb = new_skb;
1618
1619         /* Frame error, so drop the packet. */
1620         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1621                 netif_err(qdev, drv, qdev->ndev,
1622                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1623                 dev_kfree_skb_any(skb);
1624                 rx_ring->rx_errors++;
1625                 return;
1626         }
1627
1628         /* loopback self test for ethtool */
1629         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1630                 ql_check_lb_frame(qdev, skb);
1631                 dev_kfree_skb_any(skb);
1632                 return;
1633         }
1634
1635         /* The max framesize filter on this chip is set higher than
1636          * MTU since FCoE uses 2k frames.
1637          */
1638         if (skb->len > ndev->mtu + ETH_HLEN) {
1639                 dev_kfree_skb_any(skb);
1640                 rx_ring->rx_dropped++;
1641                 return;
1642         }
1643
1644         prefetch(skb->data);
1645         skb->dev = ndev;
1646         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1647                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1648                              "%s Multicast.\n",
1649                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1650                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1651                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1652                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1653                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1654                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1655         }
1656         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1657                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1658                              "Promiscuous Packet.\n");
1659
1660         rx_ring->rx_packets++;
1661         rx_ring->rx_bytes += skb->len;
1662         skb->protocol = eth_type_trans(skb, ndev);
1663         skb->ip_summed = CHECKSUM_NONE;
1664
1665         /* If rx checksum is on, and there are no
1666          * csum or frame errors.
1667          */
1668         if (qdev->rx_csum &&
1669                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1670                 /* TCP frame. */
1671                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1672                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1673                                      "TCP checksum done!\n");
1674                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1675                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1676                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1677                         /* Unfragmented ipv4 UDP frame. */
1678                         struct iphdr *iph = (struct iphdr *) skb->data;
1679                         if (!(iph->frag_off &
1680                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1681                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1682                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1683                                              qdev->ndev,
1684                                              "TCP checksum done!\n");
1685                         }
1686                 }
1687         }
1688
1689         skb_record_rx_queue(skb, rx_ring->cq_id);
1690         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1691                 if (qdev->vlgrp && (vlan_id != 0xffff))
1692                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1693                                                 vlan_id, skb);
1694                 else
1695                         napi_gro_receive(&rx_ring->napi, skb);
1696         } else {
1697                 if (qdev->vlgrp && (vlan_id != 0xffff))
1698                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1699                 else
1700                         netif_receive_skb(skb);
1701         }
1702 }
1703
1704 static void ql_realign_skb(struct sk_buff *skb, int len)
1705 {
1706         void *temp_addr = skb->data;
1707
1708         /* Undo the skb_reserve(skb,32) we did before
1709          * giving to hardware, and realign data on
1710          * a 2-byte boundary.
1711          */
1712         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1713         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1714         skb_copy_to_linear_data(skb, temp_addr,
1715                 (unsigned int)len);
1716 }
1717
1718 /*
1719  * This function builds an skb for the given inbound
1720  * completion.  It will be rewritten for readability in the near
1721  * future, but for not it works well.
1722  */
1723 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1724                                        struct rx_ring *rx_ring,
1725                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1726 {
1727         struct bq_desc *lbq_desc;
1728         struct bq_desc *sbq_desc;
1729         struct sk_buff *skb = NULL;
1730         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1731        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1732
1733         /*
1734          * Handle the header buffer if present.
1735          */
1736         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1737             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1738                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1739                              "Header of %d bytes in small buffer.\n", hdr_len);
1740                 /*
1741                  * Headers fit nicely into a small buffer.
1742                  */
1743                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1744                 pci_unmap_single(qdev->pdev,
1745                                 pci_unmap_addr(sbq_desc, mapaddr),
1746                                 pci_unmap_len(sbq_desc, maplen),
1747                                 PCI_DMA_FROMDEVICE);
1748                 skb = sbq_desc->p.skb;
1749                 ql_realign_skb(skb, hdr_len);
1750                 skb_put(skb, hdr_len);
1751                 sbq_desc->p.skb = NULL;
1752         }
1753
1754         /*
1755          * Handle the data buffer(s).
1756          */
1757         if (unlikely(!length)) {        /* Is there data too? */
1758                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1759                              "No Data buffer in this packet.\n");
1760                 return skb;
1761         }
1762
1763         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1764                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1765                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1766                                      "Headers in small, data of %d bytes in small, combine them.\n",
1767                                      length);
1768                         /*
1769                          * Data is less than small buffer size so it's
1770                          * stuffed in a small buffer.
1771                          * For this case we append the data
1772                          * from the "data" small buffer to the "header" small
1773                          * buffer.
1774                          */
1775                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1776                         pci_dma_sync_single_for_cpu(qdev->pdev,
1777                                                     pci_unmap_addr
1778                                                     (sbq_desc, mapaddr),
1779                                                     pci_unmap_len
1780                                                     (sbq_desc, maplen),
1781                                                     PCI_DMA_FROMDEVICE);
1782                         memcpy(skb_put(skb, length),
1783                                sbq_desc->p.skb->data, length);
1784                         pci_dma_sync_single_for_device(qdev->pdev,
1785                                                        pci_unmap_addr
1786                                                        (sbq_desc,
1787                                                         mapaddr),
1788                                                        pci_unmap_len
1789                                                        (sbq_desc,
1790                                                         maplen),
1791                                                        PCI_DMA_FROMDEVICE);
1792                 } else {
1793                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1794                                      "%d bytes in a single small buffer.\n",
1795                                      length);
1796                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1797                         skb = sbq_desc->p.skb;
1798                         ql_realign_skb(skb, length);
1799                         skb_put(skb, length);
1800                         pci_unmap_single(qdev->pdev,
1801                                          pci_unmap_addr(sbq_desc,
1802                                                         mapaddr),
1803                                          pci_unmap_len(sbq_desc,
1804                                                        maplen),
1805                                          PCI_DMA_FROMDEVICE);
1806                         sbq_desc->p.skb = NULL;
1807                 }
1808         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1809                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1810                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1811                                      "Header in small, %d bytes in large. Chain large to small!\n",
1812                                      length);
1813                         /*
1814                          * The data is in a single large buffer.  We
1815                          * chain it to the header buffer's skb and let
1816                          * it rip.
1817                          */
1818                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1819                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1820                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1821                                      lbq_desc->p.pg_chunk.offset, length);
1822                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1823                                                 lbq_desc->p.pg_chunk.offset,
1824                                                 length);
1825                         skb->len += length;
1826                         skb->data_len += length;
1827                         skb->truesize += length;
1828                 } else {
1829                         /*
1830                          * The headers and data are in a single large buffer. We
1831                          * copy it to a new skb and let it go. This can happen with
1832                          * jumbo mtu on a non-TCP/UDP frame.
1833                          */
1834                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1835                         skb = netdev_alloc_skb(qdev->ndev, length);
1836                         if (skb == NULL) {
1837                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1838                                              "No skb available, drop the packet.\n");
1839                                 return NULL;
1840                         }
1841                         pci_unmap_page(qdev->pdev,
1842                                        pci_unmap_addr(lbq_desc,
1843                                                       mapaddr),
1844                                        pci_unmap_len(lbq_desc, maplen),
1845                                        PCI_DMA_FROMDEVICE);
1846                         skb_reserve(skb, NET_IP_ALIGN);
1847                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1848                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1849                                      length);
1850                         skb_fill_page_desc(skb, 0,
1851                                                 lbq_desc->p.pg_chunk.page,
1852                                                 lbq_desc->p.pg_chunk.offset,
1853                                                 length);
1854                         skb->len += length;
1855                         skb->data_len += length;
1856                         skb->truesize += length;
1857                         length -= length;
1858                         __pskb_pull_tail(skb,
1859                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1860                                 VLAN_ETH_HLEN : ETH_HLEN);
1861                 }
1862         } else {
1863                 /*
1864                  * The data is in a chain of large buffers
1865                  * pointed to by a small buffer.  We loop
1866                  * thru and chain them to the our small header
1867                  * buffer's skb.
1868                  * frags:  There are 18 max frags and our small
1869                  *         buffer will hold 32 of them. The thing is,
1870                  *         we'll use 3 max for our 9000 byte jumbo
1871                  *         frames.  If the MTU goes up we could
1872                  *          eventually be in trouble.
1873                  */
1874                 int size, i = 0;
1875                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1876                 pci_unmap_single(qdev->pdev,
1877                                  pci_unmap_addr(sbq_desc, mapaddr),
1878                                  pci_unmap_len(sbq_desc, maplen),
1879                                  PCI_DMA_FROMDEVICE);
1880                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1881                         /*
1882                          * This is an non TCP/UDP IP frame, so
1883                          * the headers aren't split into a small
1884                          * buffer.  We have to use the small buffer
1885                          * that contains our sg list as our skb to
1886                          * send upstairs. Copy the sg list here to
1887                          * a local buffer and use it to find the
1888                          * pages to chain.
1889                          */
1890                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1891                                      "%d bytes of headers & data in chain of large.\n",
1892                                      length);
1893                         skb = sbq_desc->p.skb;
1894                         sbq_desc->p.skb = NULL;
1895                         skb_reserve(skb, NET_IP_ALIGN);
1896                 }
1897                 while (length > 0) {
1898                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1899                         size = (length < rx_ring->lbq_buf_size) ? length :
1900                                 rx_ring->lbq_buf_size;
1901
1902                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1903                                      "Adding page %d to skb for %d bytes.\n",
1904                                      i, size);
1905                         skb_fill_page_desc(skb, i,
1906                                                 lbq_desc->p.pg_chunk.page,
1907                                                 lbq_desc->p.pg_chunk.offset,
1908                                                 size);
1909                         skb->len += size;
1910                         skb->data_len += size;
1911                         skb->truesize += size;
1912                         length -= size;
1913                         i++;
1914                 }
1915                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1916                                 VLAN_ETH_HLEN : ETH_HLEN);
1917         }
1918         return skb;
1919 }
1920
1921 /* Process an inbound completion from an rx ring. */
1922 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1923                                    struct rx_ring *rx_ring,
1924                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1925                                    u16 vlan_id)
1926 {
1927         struct net_device *ndev = qdev->ndev;
1928         struct sk_buff *skb = NULL;
1929
1930         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1931
1932         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1933         if (unlikely(!skb)) {
1934                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1935                              "No skb available, drop packet.\n");
1936                 rx_ring->rx_dropped++;
1937                 return;
1938         }
1939
1940         /* Frame error, so drop the packet. */
1941         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1942                 netif_err(qdev, drv, qdev->ndev,
1943                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1944                 dev_kfree_skb_any(skb);
1945                 rx_ring->rx_errors++;
1946                 return;
1947         }
1948
1949         /* The max framesize filter on this chip is set higher than
1950          * MTU since FCoE uses 2k frames.
1951          */
1952         if (skb->len > ndev->mtu + ETH_HLEN) {
1953                 dev_kfree_skb_any(skb);
1954                 rx_ring->rx_dropped++;
1955                 return;
1956         }
1957
1958         /* loopback self test for ethtool */
1959         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1960                 ql_check_lb_frame(qdev, skb);
1961                 dev_kfree_skb_any(skb);
1962                 return;
1963         }
1964
1965         prefetch(skb->data);
1966         skb->dev = ndev;
1967         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1968                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1969                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1970                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1971                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1972                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1973                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1974                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1975                 rx_ring->rx_multicast++;
1976         }
1977         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1978                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1979                              "Promiscuous Packet.\n");
1980         }
1981
1982         skb->protocol = eth_type_trans(skb, ndev);
1983         skb->ip_summed = CHECKSUM_NONE;
1984
1985         /* If rx checksum is on, and there are no
1986          * csum or frame errors.
1987          */
1988         if (qdev->rx_csum &&
1989                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1990                 /* TCP frame. */
1991                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1992                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1993                                      "TCP checksum done!\n");
1994                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1995                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1996                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1997                 /* Unfragmented ipv4 UDP frame. */
1998                         struct iphdr *iph = (struct iphdr *) skb->data;
1999                         if (!(iph->frag_off &
2000                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
2001                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2002                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2003                                              "TCP checksum done!\n");
2004                         }
2005                 }
2006         }
2007
2008         rx_ring->rx_packets++;
2009         rx_ring->rx_bytes += skb->len;
2010         skb_record_rx_queue(skb, rx_ring->cq_id);
2011         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2012                 if (qdev->vlgrp &&
2013                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2014                         (vlan_id != 0))
2015                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2016                                 vlan_id, skb);
2017                 else
2018                         napi_gro_receive(&rx_ring->napi, skb);
2019         } else {
2020                 if (qdev->vlgrp &&
2021                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2022                         (vlan_id != 0))
2023                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2024                 else
2025                         netif_receive_skb(skb);
2026         }
2027 }
2028
2029 /* Process an inbound completion from an rx ring. */
2030 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2031                                         struct rx_ring *rx_ring,
2032                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2033 {
2034         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2035         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2036                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2037                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2038
2039         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2040
2041         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2042                 /* The data and headers are split into
2043                  * separate buffers.
2044                  */
2045                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2046                                                 vlan_id);
2047         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2048                 /* The data fit in a single small buffer.
2049                  * Allocate a new skb, copy the data and
2050                  * return the buffer to the free pool.
2051                  */
2052                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2053                                                 length, vlan_id);
2054         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2055                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2056                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2057                 /* TCP packet in a page chunk that's been checksummed.
2058                  * Tack it on to our GRO skb and let it go.
2059                  */
2060                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2061                                                 length, vlan_id);
2062         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2063                 /* Non-TCP packet in a page chunk. Allocate an
2064                  * skb, tack it on frags, and send it up.
2065                  */
2066                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2067                                                 length, vlan_id);
2068         } else {
2069                 struct bq_desc *lbq_desc;
2070
2071                 /* Free small buffer that holds the IAL */
2072                 lbq_desc = ql_get_curr_sbuf(rx_ring);
2073                 netif_err(qdev, rx_err, qdev->ndev,
2074                           "Dropping frame, len %d > mtu %d\n",
2075                           length, qdev->ndev->mtu);
2076
2077                 /* Unwind the large buffers for this frame. */
2078                 while (length > 0) {
2079                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
2080                         length -= (length < rx_ring->lbq_buf_size) ?
2081                                 length : rx_ring->lbq_buf_size;
2082                         put_page(lbq_desc->p.pg_chunk.page);
2083                 }
2084         }
2085
2086         return (unsigned long)length;
2087 }
2088
2089 /* Process an outbound completion from an rx ring. */
2090 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2091                                    struct ob_mac_iocb_rsp *mac_rsp)
2092 {
2093         struct tx_ring *tx_ring;
2094         struct tx_ring_desc *tx_ring_desc;
2095
2096         QL_DUMP_OB_MAC_RSP(mac_rsp);
2097         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2098         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2099         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2100         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2101         tx_ring->tx_packets++;
2102         dev_kfree_skb(tx_ring_desc->skb);
2103         tx_ring_desc->skb = NULL;
2104
2105         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2106                                         OB_MAC_IOCB_RSP_S |
2107                                         OB_MAC_IOCB_RSP_L |
2108                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2109                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2110                         netif_warn(qdev, tx_done, qdev->ndev,
2111                                    "Total descriptor length did not match transfer length.\n");
2112                 }
2113                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2114                         netif_warn(qdev, tx_done, qdev->ndev,
2115                                    "Frame too short to be valid, not sent.\n");
2116                 }
2117                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2118                         netif_warn(qdev, tx_done, qdev->ndev,
2119                                    "Frame too long, but sent anyway.\n");
2120                 }
2121                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2122                         netif_warn(qdev, tx_done, qdev->ndev,
2123                                    "PCI backplane error. Frame not sent.\n");
2124                 }
2125         }
2126         atomic_inc(&tx_ring->tx_count);
2127 }
2128
2129 /* Fire up a handler to reset the MPI processor. */
2130 void ql_queue_fw_error(struct ql_adapter *qdev)
2131 {
2132         ql_link_off(qdev);
2133         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2134 }
2135
2136 void ql_queue_asic_error(struct ql_adapter *qdev)
2137 {
2138         ql_link_off(qdev);
2139         ql_disable_interrupts(qdev);
2140         /* Clear adapter up bit to signal the recovery
2141          * process that it shouldn't kill the reset worker
2142          * thread
2143          */
2144         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2145         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2146 }
2147
2148 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2149                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2150 {
2151         switch (ib_ae_rsp->event) {
2152         case MGMT_ERR_EVENT:
2153                 netif_err(qdev, rx_err, qdev->ndev,
2154                           "Management Processor Fatal Error.\n");
2155                 ql_queue_fw_error(qdev);
2156                 return;
2157
2158         case CAM_LOOKUP_ERR_EVENT:
2159                 netif_err(qdev, link, qdev->ndev,
2160                           "Multiple CAM hits lookup occurred.\n");
2161                 netif_err(qdev, drv, qdev->ndev,
2162                           "This event shouldn't occur.\n");
2163                 ql_queue_asic_error(qdev);
2164                 return;
2165
2166         case SOFT_ECC_ERROR_EVENT:
2167                 netif_err(qdev, rx_err, qdev->ndev,
2168                           "Soft ECC error detected.\n");
2169                 ql_queue_asic_error(qdev);
2170                 break;
2171
2172         case PCI_ERR_ANON_BUF_RD:
2173                 netif_err(qdev, rx_err, qdev->ndev,
2174                           "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2175                           ib_ae_rsp->q_id);
2176                 ql_queue_asic_error(qdev);
2177                 break;
2178
2179         default:
2180                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2181                           ib_ae_rsp->event);
2182                 ql_queue_asic_error(qdev);
2183                 break;
2184         }
2185 }
2186
2187 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2188 {
2189         struct ql_adapter *qdev = rx_ring->qdev;
2190         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2191         struct ob_mac_iocb_rsp *net_rsp = NULL;
2192         int count = 0;
2193
2194         struct tx_ring *tx_ring;
2195         /* While there are entries in the completion queue. */
2196         while (prod != rx_ring->cnsmr_idx) {
2197
2198                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2199                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2200                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2201
2202                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2203                 rmb();
2204                 switch (net_rsp->opcode) {
2205
2206                 case OPCODE_OB_MAC_TSO_IOCB:
2207                 case OPCODE_OB_MAC_IOCB:
2208                         ql_process_mac_tx_intr(qdev, net_rsp);
2209                         break;
2210                 default:
2211                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2212                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2213                                      net_rsp->opcode);
2214                 }
2215                 count++;
2216                 ql_update_cq(rx_ring);
2217                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2218         }
2219         ql_write_cq_idx(rx_ring);
2220         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2221         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
2222                                         net_rsp != NULL) {
2223                 if (atomic_read(&tx_ring->queue_stopped) &&
2224                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2225                         /*
2226                          * The queue got stopped because the tx_ring was full.
2227                          * Wake it up, because it's now at least 25% empty.
2228                          */
2229                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2230         }
2231
2232         return count;
2233 }
2234
2235 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2236 {
2237         struct ql_adapter *qdev = rx_ring->qdev;
2238         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2239         struct ql_net_rsp_iocb *net_rsp;
2240         int count = 0;
2241
2242         /* While there are entries in the completion queue. */
2243         while (prod != rx_ring->cnsmr_idx) {
2244
2245                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2246                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2247                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2248
2249                 net_rsp = rx_ring->curr_entry;
2250                 rmb();
2251                 switch (net_rsp->opcode) {
2252                 case OPCODE_IB_MAC_IOCB:
2253                         ql_process_mac_rx_intr(qdev, rx_ring,
2254                                                (struct ib_mac_iocb_rsp *)
2255                                                net_rsp);
2256                         break;
2257
2258                 case OPCODE_IB_AE_IOCB:
2259                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2260                                                 net_rsp);
2261                         break;
2262                 default:
2263                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2264                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2265                                      net_rsp->opcode);
2266                         break;
2267                 }
2268                 count++;
2269                 ql_update_cq(rx_ring);
2270                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2271                 if (count == budget)
2272                         break;
2273         }
2274         ql_update_buffer_queues(qdev, rx_ring);
2275         ql_write_cq_idx(rx_ring);
2276         return count;
2277 }
2278
2279 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2280 {
2281         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2282         struct ql_adapter *qdev = rx_ring->qdev;
2283         struct rx_ring *trx_ring;
2284         int i, work_done = 0;
2285         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2286
2287         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2288                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2289
2290         /* Service the TX rings first.  They start
2291          * right after the RSS rings. */
2292         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2293                 trx_ring = &qdev->rx_ring[i];
2294                 /* If this TX completion ring belongs to this vector and
2295                  * it's not empty then service it.
2296                  */
2297                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2298                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2299                                         trx_ring->cnsmr_idx)) {
2300                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2301                                      "%s: Servicing TX completion ring %d.\n",
2302                                      __func__, trx_ring->cq_id);
2303                         ql_clean_outbound_rx_ring(trx_ring);
2304                 }
2305         }
2306
2307         /*
2308          * Now service the RSS ring if it's active.
2309          */
2310         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2311                                         rx_ring->cnsmr_idx) {
2312                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2313                              "%s: Servicing RX completion ring %d.\n",
2314                              __func__, rx_ring->cq_id);
2315                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2316         }
2317
2318         if (work_done < budget) {
2319                 napi_complete(napi);
2320                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2321         }
2322         return work_done;
2323 }
2324
2325 static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
2326 {
2327         struct ql_adapter *qdev = netdev_priv(ndev);
2328
2329         qdev->vlgrp = grp;
2330         if (grp) {
2331                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2332                              "Turning on VLAN in NIC_RCV_CFG.\n");
2333                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2334                            NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2335         } else {
2336                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2337                              "Turning off VLAN in NIC_RCV_CFG.\n");
2338                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2339         }
2340 }
2341
2342 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2343 {
2344         struct ql_adapter *qdev = netdev_priv(ndev);
2345         u32 enable_bit = MAC_ADDR_E;
2346         int status;
2347
2348         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2349         if (status)
2350                 return;
2351         if (ql_set_mac_addr_reg
2352             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2353                 netif_err(qdev, ifup, qdev->ndev,
2354                           "Failed to init vlan address.\n");
2355         }
2356         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2357 }
2358
2359 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2360 {
2361         struct ql_adapter *qdev = netdev_priv(ndev);
2362         u32 enable_bit = 0;
2363         int status;
2364
2365         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2366         if (status)
2367                 return;
2368
2369         if (ql_set_mac_addr_reg
2370             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2371                 netif_err(qdev, ifup, qdev->ndev,
2372                           "Failed to clear vlan address.\n");
2373         }
2374         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2375
2376 }
2377
2378 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2379 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2380 {
2381         struct rx_ring *rx_ring = dev_id;
2382         napi_schedule(&rx_ring->napi);
2383         return IRQ_HANDLED;
2384 }
2385
2386 /* This handles a fatal error, MPI activity, and the default
2387  * rx_ring in an MSI-X multiple vector environment.
2388  * In MSI/Legacy environment it also process the rest of
2389  * the rx_rings.
2390  */
2391 static irqreturn_t qlge_isr(int irq, void *dev_id)
2392 {
2393         struct rx_ring *rx_ring = dev_id;
2394         struct ql_adapter *qdev = rx_ring->qdev;
2395         struct intr_context *intr_context = &qdev->intr_context[0];
2396         u32 var;
2397         int work_done = 0;
2398
2399         spin_lock(&qdev->hw_lock);
2400         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2401                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2402                              "Shared Interrupt, Not ours!\n");
2403                 spin_unlock(&qdev->hw_lock);
2404                 return IRQ_NONE;
2405         }
2406         spin_unlock(&qdev->hw_lock);
2407
2408         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2409
2410         /*
2411          * Check for fatal error.
2412          */
2413         if (var & STS_FE) {
2414                 ql_queue_asic_error(qdev);
2415                 netif_err(qdev, intr, qdev->ndev,
2416                           "Got fatal error, STS = %x.\n", var);
2417                 var = ql_read32(qdev, ERR_STS);
2418                 netif_err(qdev, intr, qdev->ndev,
2419                           "Resetting chip. Error Status Register = 0x%x\n", var);
2420                 return IRQ_HANDLED;
2421         }
2422
2423         /*
2424          * Check MPI processor activity.
2425          */
2426         if ((var & STS_PI) &&
2427                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2428                 /*
2429                  * We've got an async event or mailbox completion.
2430                  * Handle it and clear the source of the interrupt.
2431                  */
2432                 netif_err(qdev, intr, qdev->ndev,
2433                           "Got MPI processor interrupt.\n");
2434                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2435                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2436                 queue_delayed_work_on(smp_processor_id(),
2437                                 qdev->workqueue, &qdev->mpi_work, 0);
2438                 work_done++;
2439         }
2440
2441         /*
2442          * Get the bit-mask that shows the active queues for this
2443          * pass.  Compare it to the queues that this irq services
2444          * and call napi if there's a match.
2445          */
2446         var = ql_read32(qdev, ISR1);
2447         if (var & intr_context->irq_mask) {
2448                 netif_info(qdev, intr, qdev->ndev,
2449                            "Waking handler for rx_ring[0].\n");
2450                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2451                 napi_schedule(&rx_ring->napi);
2452                 work_done++;
2453         }
2454         ql_enable_completion_interrupt(qdev, intr_context->intr);
2455         return work_done ? IRQ_HANDLED : IRQ_NONE;
2456 }
2457
2458 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2459 {
2460
2461         if (skb_is_gso(skb)) {
2462                 int err;
2463                 if (skb_header_cloned(skb)) {
2464                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2465                         if (err)
2466                                 return err;
2467                 }
2468
2469                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2470                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2471                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2472                 mac_iocb_ptr->total_hdrs_len =
2473                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2474                 mac_iocb_ptr->net_trans_offset =
2475                     cpu_to_le16(skb_network_offset(skb) |
2476                                 skb_transport_offset(skb)
2477                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2478                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2479                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2480                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2481                         struct iphdr *iph = ip_hdr(skb);
2482                         iph->check = 0;
2483                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2484                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2485                                                                  iph->daddr, 0,
2486                                                                  IPPROTO_TCP,
2487                                                                  0);
2488                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2489                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2490                         tcp_hdr(skb)->check =
2491                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2492                                              &ipv6_hdr(skb)->daddr,
2493                                              0, IPPROTO_TCP, 0);
2494                 }
2495                 return 1;
2496         }
2497         return 0;
2498 }
2499
2500 static void ql_hw_csum_setup(struct sk_buff *skb,
2501                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2502 {
2503         int len;
2504         struct iphdr *iph = ip_hdr(skb);
2505         __sum16 *check;
2506         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2507         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2508         mac_iocb_ptr->net_trans_offset =
2509                 cpu_to_le16(skb_network_offset(skb) |
2510                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2511
2512         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2513         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2514         if (likely(iph->protocol == IPPROTO_TCP)) {
2515                 check = &(tcp_hdr(skb)->check);
2516                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2517                 mac_iocb_ptr->total_hdrs_len =
2518                     cpu_to_le16(skb_transport_offset(skb) +
2519                                 (tcp_hdr(skb)->doff << 2));
2520         } else {
2521                 check = &(udp_hdr(skb)->check);
2522                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2523                 mac_iocb_ptr->total_hdrs_len =
2524                     cpu_to_le16(skb_transport_offset(skb) +
2525                                 sizeof(struct udphdr));
2526         }
2527         *check = ~csum_tcpudp_magic(iph->saddr,
2528                                     iph->daddr, len, iph->protocol, 0);
2529 }
2530
2531 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2532 {
2533         struct tx_ring_desc *tx_ring_desc;
2534         struct ob_mac_iocb_req *mac_iocb_ptr;
2535         struct ql_adapter *qdev = netdev_priv(ndev);
2536         int tso;
2537         struct tx_ring *tx_ring;
2538         u32 tx_ring_idx = (u32) skb->queue_mapping;
2539
2540         tx_ring = &qdev->tx_ring[tx_ring_idx];
2541
2542         if (skb_padto(skb, ETH_ZLEN))
2543                 return NETDEV_TX_OK;
2544
2545         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2546                 netif_info(qdev, tx_queued, qdev->ndev,
2547                            "%s: shutting down tx queue %d du to lack of resources.\n",
2548                            __func__, tx_ring_idx);
2549                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2550                 atomic_inc(&tx_ring->queue_stopped);
2551                 tx_ring->tx_errors++;
2552                 return NETDEV_TX_BUSY;
2553         }
2554         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2555         mac_iocb_ptr = tx_ring_desc->queue_entry;
2556         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2557
2558         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2559         mac_iocb_ptr->tid = tx_ring_desc->index;
2560         /* We use the upper 32-bits to store the tx queue for this IO.
2561          * When we get the completion we can use it to establish the context.
2562          */
2563         mac_iocb_ptr->txq_idx = tx_ring_idx;
2564         tx_ring_desc->skb = skb;
2565
2566         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2567
2568         if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2569                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2570                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2571                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2572                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2573         }
2574         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2575         if (tso < 0) {
2576                 dev_kfree_skb_any(skb);
2577                 return NETDEV_TX_OK;
2578         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2579                 ql_hw_csum_setup(skb,
2580                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2581         }
2582         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2583                         NETDEV_TX_OK) {
2584                 netif_err(qdev, tx_queued, qdev->ndev,
2585                           "Could not map the segments.\n");
2586                 tx_ring->tx_errors++;
2587                 return NETDEV_TX_BUSY;
2588         }
2589         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2590         tx_ring->prod_idx++;
2591         if (tx_ring->prod_idx == tx_ring->wq_len)
2592                 tx_ring->prod_idx = 0;
2593         wmb();
2594
2595         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2596         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2597                      "tx queued, slot %d, len %d\n",
2598                      tx_ring->prod_idx, skb->len);
2599
2600         atomic_dec(&tx_ring->tx_count);
2601         return NETDEV_TX_OK;
2602 }
2603
2604
2605 static void ql_free_shadow_space(struct ql_adapter *qdev)
2606 {
2607         if (qdev->rx_ring_shadow_reg_area) {
2608                 pci_free_consistent(qdev->pdev,
2609                                     PAGE_SIZE,
2610                                     qdev->rx_ring_shadow_reg_area,
2611                                     qdev->rx_ring_shadow_reg_dma);
2612                 qdev->rx_ring_shadow_reg_area = NULL;
2613         }
2614         if (qdev->tx_ring_shadow_reg_area) {
2615                 pci_free_consistent(qdev->pdev,
2616                                     PAGE_SIZE,
2617                                     qdev->tx_ring_shadow_reg_area,
2618                                     qdev->tx_ring_shadow_reg_dma);
2619                 qdev->tx_ring_shadow_reg_area = NULL;
2620         }
2621 }
2622
2623 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2624 {
2625         qdev->rx_ring_shadow_reg_area =
2626             pci_alloc_consistent(qdev->pdev,
2627                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2628         if (qdev->rx_ring_shadow_reg_area == NULL) {
2629                 netif_err(qdev, ifup, qdev->ndev,
2630                           "Allocation of RX shadow space failed.\n");
2631                 return -ENOMEM;
2632         }
2633         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2634         qdev->tx_ring_shadow_reg_area =
2635             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2636                                  &qdev->tx_ring_shadow_reg_dma);
2637         if (qdev->tx_ring_shadow_reg_area == NULL) {
2638                 netif_err(qdev, ifup, qdev->ndev,
2639                           "Allocation of TX shadow space failed.\n");
2640                 goto err_wqp_sh_area;
2641         }
2642         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2643         return 0;
2644
2645 err_wqp_sh_area:
2646         pci_free_consistent(qdev->pdev,
2647                             PAGE_SIZE,
2648                             qdev->rx_ring_shadow_reg_area,
2649                             qdev->rx_ring_shadow_reg_dma);
2650         return -ENOMEM;
2651 }
2652
2653 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2654 {
2655         struct tx_ring_desc *tx_ring_desc;
2656         int i;
2657         struct ob_mac_iocb_req *mac_iocb_ptr;
2658
2659         mac_iocb_ptr = tx_ring->wq_base;
2660         tx_ring_desc = tx_ring->q;
2661         for (i = 0; i < tx_ring->wq_len; i++) {
2662                 tx_ring_desc->index = i;
2663                 tx_ring_desc->skb = NULL;
2664                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2665                 mac_iocb_ptr++;
2666                 tx_ring_desc++;
2667         }
2668         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2669         atomic_set(&tx_ring->queue_stopped, 0);
2670 }
2671
2672 static void ql_free_tx_resources(struct ql_adapter *qdev,
2673                                  struct tx_ring *tx_ring)
2674 {
2675         if (tx_ring->wq_base) {
2676                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2677                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2678                 tx_ring->wq_base = NULL;
2679         }
2680         kfree(tx_ring->q);
2681         tx_ring->q = NULL;
2682 }
2683
2684 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2685                                  struct tx_ring *tx_ring)
2686 {
2687         tx_ring->wq_base =
2688             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2689                                  &tx_ring->wq_base_dma);
2690
2691         if ((tx_ring->wq_base == NULL) ||
2692             tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2693                 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2694                 return -ENOMEM;
2695         }
2696         tx_ring->q =
2697             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2698         if (tx_ring->q == NULL)
2699                 goto err;
2700
2701         return 0;
2702 err:
2703         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2704                             tx_ring->wq_base, tx_ring->wq_base_dma);
2705         return -ENOMEM;
2706 }
2707
2708 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2709 {
2710         struct bq_desc *lbq_desc;
2711
2712         uint32_t  curr_idx, clean_idx;
2713
2714         curr_idx = rx_ring->lbq_curr_idx;
2715         clean_idx = rx_ring->lbq_clean_idx;
2716         while (curr_idx != clean_idx) {
2717                 lbq_desc = &rx_ring->lbq[curr_idx];
2718
2719                 if (lbq_desc->p.pg_chunk.last_flag) {
2720                         pci_unmap_page(qdev->pdev,
2721                                 lbq_desc->p.pg_chunk.map,
2722                                 ql_lbq_block_size(qdev),
2723                                        PCI_DMA_FROMDEVICE);
2724                         lbq_desc->p.pg_chunk.last_flag = 0;
2725                 }
2726
2727                 put_page(lbq_desc->p.pg_chunk.page);
2728                 lbq_desc->p.pg_chunk.page = NULL;
2729
2730                 if (++curr_idx == rx_ring->lbq_len)
2731                         curr_idx = 0;
2732
2733         }
2734 }
2735
2736 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2737 {
2738         int i;
2739         struct bq_desc *sbq_desc;
2740
2741         for (i = 0; i < rx_ring->sbq_len; i++) {
2742                 sbq_desc = &rx_ring->sbq[i];
2743                 if (sbq_desc == NULL) {
2744                         netif_err(qdev, ifup, qdev->ndev,
2745                                   "sbq_desc %d is NULL.\n", i);
2746                         return;
2747                 }
2748                 if (sbq_desc->p.skb) {
2749                         pci_unmap_single(qdev->pdev,
2750                                          pci_unmap_addr(sbq_desc, mapaddr),
2751                                          pci_unmap_len(sbq_desc, maplen),
2752                                          PCI_DMA_FROMDEVICE);
2753                         dev_kfree_skb(sbq_desc->p.skb);
2754                         sbq_desc->p.skb = NULL;
2755                 }
2756         }
2757 }
2758
2759 /* Free all large and small rx buffers associated
2760  * with the completion queues for this device.
2761  */
2762 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2763 {
2764         int i;
2765         struct rx_ring *rx_ring;
2766
2767         for (i = 0; i < qdev->rx_ring_count; i++) {
2768                 rx_ring = &qdev->rx_ring[i];
2769                 if (rx_ring->lbq)
2770                         ql_free_lbq_buffers(qdev, rx_ring);
2771                 if (rx_ring->sbq)
2772                         ql_free_sbq_buffers(qdev, rx_ring);
2773         }
2774 }
2775
2776 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2777 {
2778         struct rx_ring *rx_ring;
2779         int i;
2780
2781         for (i = 0; i < qdev->rx_ring_count; i++) {
2782                 rx_ring = &qdev->rx_ring[i];
2783                 if (rx_ring->type != TX_Q)
2784                         ql_update_buffer_queues(qdev, rx_ring);
2785         }
2786 }
2787
2788 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2789                                 struct rx_ring *rx_ring)
2790 {
2791         int i;
2792         struct bq_desc *lbq_desc;
2793         __le64 *bq = rx_ring->lbq_base;
2794
2795         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2796         for (i = 0; i < rx_ring->lbq_len; i++) {
2797                 lbq_desc = &rx_ring->lbq[i];
2798                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2799                 lbq_desc->index = i;
2800                 lbq_desc->addr = bq;
2801                 bq++;
2802         }
2803 }
2804
2805 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2806                                 struct rx_ring *rx_ring)
2807 {
2808         int i;
2809         struct bq_desc *sbq_desc;
2810         __le64 *bq = rx_ring->sbq_base;
2811
2812         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2813         for (i = 0; i < rx_ring->sbq_len; i++) {
2814                 sbq_desc = &rx_ring->sbq[i];
2815                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2816                 sbq_desc->index = i;
2817                 sbq_desc->addr = bq;
2818                 bq++;
2819         }
2820 }
2821
2822 static void ql_free_rx_resources(struct ql_adapter *qdev,
2823                                  struct rx_ring *rx_ring)
2824 {
2825         /* Free the small buffer queue. */
2826         if (rx_ring->sbq_base) {
2827                 pci_free_consistent(qdev->pdev,
2828                                     rx_ring->sbq_size,
2829                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2830                 rx_ring->sbq_base = NULL;
2831         }
2832
2833         /* Free the small buffer queue control blocks. */
2834         kfree(rx_ring->sbq);
2835         rx_ring->sbq = NULL;
2836
2837         /* Free the large buffer queue. */
2838         if (rx_ring->lbq_base) {
2839                 pci_free_consistent(qdev->pdev,
2840                                     rx_ring->lbq_size,
2841                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2842                 rx_ring->lbq_base = NULL;
2843         }
2844
2845         /* Free the large buffer queue control blocks. */
2846         kfree(rx_ring->lbq);
2847         rx_ring->lbq = NULL;
2848
2849         /* Free the rx queue. */
2850         if (rx_ring->cq_base) {
2851                 pci_free_consistent(qdev->pdev,
2852                                     rx_ring->cq_size,
2853                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2854                 rx_ring->cq_base = NULL;
2855         }
2856 }
2857
2858 /* Allocate queues and buffers for this completions queue based
2859  * on the values in the parameter structure. */
2860 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2861                                  struct rx_ring *rx_ring)
2862 {
2863
2864         /*
2865          * Allocate the completion queue for this rx_ring.
2866          */
2867         rx_ring->cq_base =
2868             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2869                                  &rx_ring->cq_base_dma);
2870
2871         if (rx_ring->cq_base == NULL) {
2872                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2873                 return -ENOMEM;
2874         }
2875
2876         if (rx_ring->sbq_len) {
2877                 /*
2878                  * Allocate small buffer queue.
2879                  */
2880                 rx_ring->sbq_base =
2881                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2882                                          &rx_ring->sbq_base_dma);
2883
2884                 if (rx_ring->sbq_base == NULL) {
2885                         netif_err(qdev, ifup, qdev->ndev,
2886                                   "Small buffer queue allocation failed.\n");
2887                         goto err_mem;
2888                 }
2889
2890                 /*
2891                  * Allocate small buffer queue control blocks.
2892                  */
2893                 rx_ring->sbq =
2894                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2895                             GFP_KERNEL);
2896                 if (rx_ring->sbq == NULL) {
2897                         netif_err(qdev, ifup, qdev->ndev,
2898                                   "Small buffer queue control block allocation failed.\n");
2899                         goto err_mem;
2900                 }
2901
2902                 ql_init_sbq_ring(qdev, rx_ring);
2903         }
2904
2905         if (rx_ring->lbq_len) {
2906                 /*
2907                  * Allocate large buffer queue.
2908                  */
2909                 rx_ring->lbq_base =
2910                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2911                                          &rx_ring->lbq_base_dma);
2912
2913                 if (rx_ring->lbq_base == NULL) {
2914                         netif_err(qdev, ifup, qdev->ndev,
2915                                   "Large buffer queue allocation failed.\n");
2916                         goto err_mem;
2917                 }
2918                 /*
2919                  * Allocate large buffer queue control blocks.
2920                  */
2921                 rx_ring->lbq =
2922                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2923                             GFP_KERNEL);
2924                 if (rx_ring->lbq == NULL) {
2925                         netif_err(qdev, ifup, qdev->ndev,
2926                                   "Large buffer queue control block allocation failed.\n");
2927                         goto err_mem;
2928                 }
2929
2930                 ql_init_lbq_ring(qdev, rx_ring);
2931         }
2932
2933         return 0;
2934
2935 err_mem:
2936         ql_free_rx_resources(qdev, rx_ring);
2937         return -ENOMEM;
2938 }
2939
2940 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2941 {
2942         struct tx_ring *tx_ring;
2943         struct tx_ring_desc *tx_ring_desc;
2944         int i, j;
2945
2946         /*
2947          * Loop through all queues and free
2948          * any resources.
2949          */
2950         for (j = 0; j < qdev->tx_ring_count; j++) {
2951                 tx_ring = &qdev->tx_ring[j];
2952                 for (i = 0; i < tx_ring->wq_len; i++) {
2953                         tx_ring_desc = &tx_ring->q[i];
2954                         if (tx_ring_desc && tx_ring_desc->skb) {
2955                                 netif_err(qdev, ifdown, qdev->ndev,
2956                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
2957                                           tx_ring_desc->skb, j,
2958                                           tx_ring_desc->index);
2959                                 ql_unmap_send(qdev, tx_ring_desc,
2960                                               tx_ring_desc->map_cnt);
2961                                 dev_kfree_skb(tx_ring_desc->skb);
2962                                 tx_ring_desc->skb = NULL;
2963                         }
2964                 }
2965         }
2966 }
2967
2968 static void ql_free_mem_resources(struct ql_adapter *qdev)
2969 {
2970         int i;
2971
2972         for (i = 0; i < qdev->tx_ring_count; i++)
2973                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2974         for (i = 0; i < qdev->rx_ring_count; i++)
2975                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2976         ql_free_shadow_space(qdev);
2977 }
2978
2979 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2980 {
2981         int i;
2982
2983         /* Allocate space for our shadow registers and such. */
2984         if (ql_alloc_shadow_space(qdev))
2985                 return -ENOMEM;
2986
2987         for (i = 0; i < qdev->rx_ring_count; i++) {
2988                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2989                         netif_err(qdev, ifup, qdev->ndev,
2990                                   "RX resource allocation failed.\n");
2991                         goto err_mem;
2992                 }
2993         }
2994         /* Allocate tx queue resources */
2995         for (i = 0; i < qdev->tx_ring_count; i++) {
2996                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2997                         netif_err(qdev, ifup, qdev->ndev,
2998                                   "TX resource allocation failed.\n");
2999                         goto err_mem;
3000                 }
3001         }
3002         return 0;
3003
3004 err_mem:
3005         ql_free_mem_resources(qdev);
3006         return -ENOMEM;
3007 }
3008
3009 /* Set up the rx ring control block and pass it to the chip.
3010  * The control block is defined as
3011  * "Completion Queue Initialization Control Block", or cqicb.
3012  */
3013 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3014 {
3015         struct cqicb *cqicb = &rx_ring->cqicb;
3016         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3017                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3018         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3019                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3020         void __iomem *doorbell_area =
3021             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3022         int err = 0;
3023         u16 bq_len;
3024         u64 tmp;
3025         __le64 *base_indirect_ptr;
3026         int page_entries;
3027
3028         /* Set up the shadow registers for this ring. */
3029         rx_ring->prod_idx_sh_reg = shadow_reg;
3030         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3031         *rx_ring->prod_idx_sh_reg = 0;
3032         shadow_reg += sizeof(u64);
3033         shadow_reg_dma += sizeof(u64);
3034         rx_ring->lbq_base_indirect = shadow_reg;
3035         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3036         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3037         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3038         rx_ring->sbq_base_indirect = shadow_reg;
3039         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3040
3041         /* PCI doorbell mem area + 0x00 for consumer index register */
3042         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3043         rx_ring->cnsmr_idx = 0;
3044         rx_ring->curr_entry = rx_ring->cq_base;
3045
3046         /* PCI doorbell mem area + 0x04 for valid register */
3047         rx_ring->valid_db_reg = doorbell_area + 0x04;
3048
3049         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3050         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3051
3052         /* PCI doorbell mem area + 0x1c */
3053         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3054
3055         memset((void *)cqicb, 0, sizeof(struct cqicb));
3056         cqicb->msix_vect = rx_ring->irq;
3057
3058         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3059         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3060
3061         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3062
3063         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3064
3065         /*
3066          * Set up the control block load flags.
3067          */
3068         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3069             FLAGS_LV |          /* Load MSI-X vector */
3070             FLAGS_LI;           /* Load irq delay values */
3071         if (rx_ring->lbq_len) {
3072                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3073                 tmp = (u64)rx_ring->lbq_base_dma;
3074                 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3075                 page_entries = 0;
3076                 do {
3077                         *base_indirect_ptr = cpu_to_le64(tmp);
3078                         tmp += DB_PAGE_SIZE;
3079                         base_indirect_ptr++;
3080                         page_entries++;
3081                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3082                 cqicb->lbq_addr =
3083                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3084                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3085                         (u16) rx_ring->lbq_buf_size;
3086                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3087                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3088                         (u16) rx_ring->lbq_len;
3089                 cqicb->lbq_len = cpu_to_le16(bq_len);
3090                 rx_ring->lbq_prod_idx = 0;
3091                 rx_ring->lbq_curr_idx = 0;
3092                 rx_ring->lbq_clean_idx = 0;
3093                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3094         }
3095         if (rx_ring->sbq_len) {
3096                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3097                 tmp = (u64)rx_ring->sbq_base_dma;
3098                 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3099                 page_entries = 0;
3100                 do {
3101                         *base_indirect_ptr = cpu_to_le64(tmp);
3102                         tmp += DB_PAGE_SIZE;
3103                         base_indirect_ptr++;
3104                         page_entries++;
3105                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3106                 cqicb->sbq_addr =
3107                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3108                 cqicb->sbq_buf_size =
3109                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3110                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3111                         (u16) rx_ring->sbq_len;
3112                 cqicb->sbq_len = cpu_to_le16(bq_len);
3113                 rx_ring->sbq_prod_idx = 0;
3114                 rx_ring->sbq_curr_idx = 0;
3115                 rx_ring->sbq_clean_idx = 0;
3116                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3117         }
3118         switch (rx_ring->type) {
3119         case TX_Q:
3120                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3121                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3122                 break;
3123         case RX_Q:
3124                 /* Inbound completion handling rx_rings run in
3125                  * separate NAPI contexts.
3126                  */
3127                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3128                                64);
3129                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3130                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3131                 break;
3132         default:
3133                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3134                              "Invalid rx_ring->type = %d.\n", rx_ring->type);
3135         }
3136         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3137                      "Initializing rx work queue.\n");
3138         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3139                            CFG_LCQ, rx_ring->cq_id);
3140         if (err) {
3141                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3142                 return err;
3143         }
3144         return err;
3145 }
3146
3147 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3148 {
3149         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3150         void __iomem *doorbell_area =
3151             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3152         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3153             (tx_ring->wq_id * sizeof(u64));
3154         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3155             (tx_ring->wq_id * sizeof(u64));
3156         int err = 0;
3157
3158         /*
3159          * Assign doorbell registers for this tx_ring.
3160          */
3161         /* TX PCI doorbell mem area for tx producer index */
3162         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3163         tx_ring->prod_idx = 0;
3164         /* TX PCI doorbell mem area + 0x04 */
3165         tx_ring->valid_db_reg = doorbell_area + 0x04;
3166
3167         /*
3168          * Assign shadow registers for this tx_ring.
3169          */
3170         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3171         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3172
3173         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3174         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3175                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3176         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3177         wqicb->rid = 0;
3178         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3179
3180         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3181
3182         ql_init_tx_ring(qdev, tx_ring);
3183
3184         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3185                            (u16) tx_ring->wq_id);
3186         if (err) {
3187                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3188                 return err;
3189         }
3190         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3191                      "Successfully loaded WQICB.\n");
3192         return err;
3193 }
3194
3195 static void ql_disable_msix(struct ql_adapter *qdev)
3196 {
3197         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3198                 pci_disable_msix(qdev->pdev);
3199                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3200                 kfree(qdev->msi_x_entry);
3201                 qdev->msi_x_entry = NULL;
3202         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3203                 pci_disable_msi(qdev->pdev);
3204                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3205         }
3206 }
3207
3208 /* We start by trying to get the number of vectors
3209  * stored in qdev->intr_count. If we don't get that
3210  * many then we reduce the count and try again.
3211  */
3212 static void ql_enable_msix(struct ql_adapter *qdev)
3213 {
3214         int i, err;
3215
3216         /* Get the MSIX vectors. */
3217         if (qlge_irq_type == MSIX_IRQ) {
3218                 /* Try to alloc space for the msix struct,
3219                  * if it fails then go to MSI/legacy.
3220                  */
3221                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3222                                             sizeof(struct msix_entry),
3223                                             GFP_KERNEL);
3224                 if (!qdev->msi_x_entry) {
3225                         qlge_irq_type = MSI_IRQ;
3226                         goto msi;
3227                 }
3228
3229                 for (i = 0; i < qdev->intr_count; i++)
3230                         qdev->msi_x_entry[i].entry = i;
3231
3232                 /* Loop to get our vectors.  We start with
3233                  * what we want and settle for what we get.
3234                  */
3235                 do {
3236                         err = pci_enable_msix(qdev->pdev,
3237                                 qdev->msi_x_entry, qdev->intr_count);
3238                         if (err > 0)
3239                                 qdev->intr_count = err;
3240                 } while (err > 0);
3241
3242                 if (err < 0) {
3243                         kfree(qdev->msi_x_entry);
3244                         qdev->msi_x_entry = NULL;
3245                         netif_warn(qdev, ifup, qdev->ndev,
3246                                    "MSI-X Enable failed, trying MSI.\n");
3247                         qdev->intr_count = 1;
3248                         qlge_irq_type = MSI_IRQ;
3249                 } else if (err == 0) {
3250                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3251                         netif_info(qdev, ifup, qdev->ndev,
3252                                    "MSI-X Enabled, got %d vectors.\n",
3253                                    qdev->intr_count);
3254                         return;
3255                 }
3256         }
3257 msi:
3258         qdev->intr_count = 1;
3259         if (qlge_irq_type == MSI_IRQ) {
3260                 if (!pci_enable_msi(qdev->pdev)) {
3261                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3262                         netif_info(qdev, ifup, qdev->ndev,
3263                                    "Running with MSI interrupts.\n");
3264                         return;
3265                 }
3266         }
3267         qlge_irq_type = LEG_IRQ;
3268         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3269                      "Running with legacy interrupts.\n");
3270 }
3271
3272 /* Each vector services 1 RSS ring and and 1 or more
3273  * TX completion rings.  This function loops through
3274  * the TX completion rings and assigns the vector that
3275  * will service it.  An example would be if there are
3276  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3277  * This would mean that vector 0 would service RSS ring 0
3278  * and TX competion rings 0,1,2 and 3.  Vector 1 would
3279  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3280  */
3281 static void ql_set_tx_vect(struct ql_adapter *qdev)
3282 {
3283         int i, j, vect;
3284         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3285
3286         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3287                 /* Assign irq vectors to TX rx_rings.*/
3288                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3289                                          i < qdev->rx_ring_count; i++) {
3290                         if (j == tx_rings_per_vector) {
3291                                 vect++;
3292                                 j = 0;
3293                         }
3294                         qdev->rx_ring[i].irq = vect;
3295                         j++;
3296                 }
3297         } else {
3298                 /* For single vector all rings have an irq
3299                  * of zero.
3300                  */
3301                 for (i = 0; i < qdev->rx_ring_count; i++)
3302                         qdev->rx_ring[i].irq = 0;
3303         }
3304 }
3305
3306 /* Set the interrupt mask for this vector.  Each vector
3307  * will service 1 RSS ring and 1 or more TX completion
3308  * rings.  This function sets up a bit mask per vector
3309  * that indicates which rings it services.
3310  */
3311 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3312 {
3313         int j, vect = ctx->intr;
3314         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3315
3316         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3317                 /* Add the RSS ring serviced by this vector
3318                  * to the mask.
3319                  */
3320                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3321                 /* Add the TX ring(s) serviced by this vector
3322                  * to the mask. */
3323                 for (j = 0; j < tx_rings_per_vector; j++) {
3324                         ctx->irq_mask |=
3325                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3326                         (vect * tx_rings_per_vector) + j].cq_id);
3327                 }
3328         } else {
3329                 /* For single vector we just shift each queue's
3330                  * ID into the mask.
3331                  */
3332                 for (j = 0; j < qdev->rx_ring_count; j++)
3333                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3334         }
3335 }
3336
3337 /*
3338  * Here we build the intr_context structures based on
3339  * our rx_ring count and intr vector count.
3340  * The intr_context structure is used to hook each vector
3341  * to possibly different handlers.
3342  */
3343 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3344 {
3345         int i = 0;
3346         struct intr_context *intr_context = &qdev->intr_context[0];
3347
3348         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3349                 /* Each rx_ring has it's
3350                  * own intr_context since we have separate
3351                  * vectors for each queue.
3352                  */
3353                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3354                         qdev->rx_ring[i].irq = i;
3355                         intr_context->intr = i;
3356                         intr_context->qdev = qdev;
3357                         /* Set up this vector's bit-mask that indicates
3358                          * which queues it services.
3359                          */
3360                         ql_set_irq_mask(qdev, intr_context);
3361                         /*
3362                          * We set up each vectors enable/disable/read bits so
3363                          * there's no bit/mask calculations in the critical path.
3364                          */
3365                         intr_context->intr_en_mask =
3366                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3367                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3368                             | i;
3369                         intr_context->intr_dis_mask =
3370                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3371                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3372                             INTR_EN_IHD | i;
3373                         intr_context->intr_read_mask =
3374                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3375                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3376                             i;
3377                         if (i == 0) {
3378                                 /* The first vector/queue handles
3379                                  * broadcast/multicast, fatal errors,
3380                                  * and firmware events.  This in addition
3381                                  * to normal inbound NAPI processing.
3382                                  */
3383                                 intr_context->handler = qlge_isr;
3384                                 sprintf(intr_context->name, "%s-rx-%d",
3385                                         qdev->ndev->name, i);
3386                         } else {
3387                                 /*
3388                                  * Inbound queues handle unicast frames only.
3389                                  */
3390                                 intr_context->handler = qlge_msix_rx_isr;
3391                                 sprintf(intr_context->name, "%s-rx-%d",
3392                                         qdev->ndev->name, i);
3393                         }
3394                 }
3395         } else {
3396                 /*
3397                  * All rx_rings use the same intr_context since
3398                  * there is only one vector.
3399                  */
3400                 intr_context->intr = 0;
3401                 intr_context->qdev = qdev;
3402                 /*
3403                  * We set up each vectors enable/disable/read bits so
3404                  * there's no bit/mask calculations in the critical path.
3405                  */
3406                 intr_context->intr_en_mask =
3407                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3408                 intr_context->intr_dis_mask =
3409                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3410                     INTR_EN_TYPE_DISABLE;
3411                 intr_context->intr_read_mask =
3412                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3413                 /*
3414                  * Single interrupt means one handler for all rings.
3415                  */
3416                 intr_context->handler = qlge_isr;
3417                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3418                 /* Set up this vector's bit-mask that indicates
3419                  * which queues it services. In this case there is
3420                  * a single vector so it will service all RSS and
3421                  * TX completion rings.
3422                  */
3423                 ql_set_irq_mask(qdev, intr_context);
3424         }
3425         /* Tell the TX completion rings which MSIx vector
3426          * they will be using.
3427          */
3428         ql_set_tx_vect(qdev);
3429 }
3430
3431 static void ql_free_irq(struct ql_adapter *qdev)
3432 {
3433         int i;
3434         struct intr_context *intr_context = &qdev->intr_context[0];
3435
3436         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3437                 if (intr_context->hooked) {
3438                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3439                                 free_irq(qdev->msi_x_entry[i].vector,
3440                                          &qdev->rx_ring[i]);
3441                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3442                                              "freeing msix interrupt %d.\n", i);
3443                         } else {
3444                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3445                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3446                                              "freeing msi interrupt %d.\n", i);
3447                         }
3448                 }
3449         }
3450         ql_disable_msix(qdev);
3451 }
3452
3453 static int ql_request_irq(struct ql_adapter *qdev)
3454 {
3455         int i;
3456         int status = 0;
3457         struct pci_dev *pdev = qdev->pdev;
3458         struct intr_context *intr_context = &qdev->intr_context[0];
3459
3460         ql_resolve_queues_to_irqs(qdev);
3461
3462         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3463                 atomic_set(&intr_context->irq_cnt, 0);
3464                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3465                         status = request_irq(qdev->msi_x_entry[i].vector,
3466                                              intr_context->handler,
3467                                              0,
3468                                              intr_context->name,
3469                                              &qdev->rx_ring[i]);
3470                         if (status) {
3471                                 netif_err(qdev, ifup, qdev->ndev,
3472                                           "Failed request for MSIX interrupt %d.\n",
3473                                           i);
3474                                 goto err_irq;
3475                         } else {
3476                                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3477                                              "Hooked intr %d, queue type %s, with name %s.\n",
3478                                              i,
3479                                              qdev->rx_ring[i].type == DEFAULT_Q ?
3480                                              "DEFAULT_Q" :
3481                                              qdev->rx_ring[i].type == TX_Q ?
3482                                              "TX_Q" :
3483                                              qdev->rx_ring[i].type == RX_Q ?
3484                                              "RX_Q" : "",
3485                                              intr_context->name);
3486                         }
3487                 } else {
3488                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3489                                      "trying msi or legacy interrupts.\n");
3490                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3491                                      "%s: irq = %d.\n", __func__, pdev->irq);
3492                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3493                                      "%s: context->name = %s.\n", __func__,
3494                                      intr_context->name);
3495                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3496                                      "%s: dev_id = 0x%p.\n", __func__,
3497                                      &qdev->rx_ring[0]);
3498                         status =
3499                             request_irq(pdev->irq, qlge_isr,
3500                                         test_bit(QL_MSI_ENABLED,
3501                                                  &qdev->
3502                                                  flags) ? 0 : IRQF_SHARED,
3503                                         intr_context->name, &qdev->rx_ring[0]);
3504                         if (status)
3505                                 goto err_irq;
3506
3507                         netif_err(qdev, ifup, qdev->ndev,
3508                                   "Hooked intr %d, queue type %s, with name %s.\n",
3509                                   i,
3510                                   qdev->rx_ring[0].type == DEFAULT_Q ?
3511                                   "DEFAULT_Q" :
3512                                   qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3513                                   qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3514                                   intr_context->name);
3515                 }
3516                 intr_context->hooked = 1;
3517         }
3518         return status;
3519 err_irq:
3520         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3521         ql_free_irq(qdev);
3522         return status;
3523 }
3524
3525 static int ql_start_rss(struct ql_adapter *qdev)
3526 {
3527         u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3528                                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3529                                 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3530                                 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3531                                 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3532                                 0xbe, 0xac, 0x01, 0xfa};
3533         struct ricb *ricb = &qdev->ricb;
3534         int status = 0;
3535         int i;
3536         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3537
3538         memset((void *)ricb, 0, sizeof(*ricb));
3539
3540         ricb->base_cq = RSS_L4K;
3541         ricb->flags =
3542                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3543         ricb->mask = cpu_to_le16((u16)(0x3ff));
3544
3545         /*
3546          * Fill out the Indirection Table.
3547          */
3548         for (i = 0; i < 1024; i++)
3549                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3550
3551         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3552         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3553
3554         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3555
3556         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3557         if (status) {
3558                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3559                 return status;
3560         }
3561         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3562                      "Successfully loaded RICB.\n");
3563         return status;
3564 }
3565
3566 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3567 {
3568         int i, status = 0;
3569
3570         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3571         if (status)
3572                 return status;
3573         /* Clear all the entries in the routing table. */
3574         for (i = 0; i < 16; i++) {
3575                 status = ql_set_routing_reg(qdev, i, 0, 0);
3576                 if (status) {
3577                         netif_err(qdev, ifup, qdev->ndev,
3578                                   "Failed to init routing register for CAM packets.\n");
3579                         break;
3580                 }
3581         }
3582         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3583         return status;
3584 }
3585
3586 /* Initialize the frame-to-queue routing. */
3587 static int ql_route_initialize(struct ql_adapter *qdev)
3588 {
3589         int status = 0;
3590
3591         /* Clear all the entries in the routing table. */
3592         status = ql_clear_routing_entries(qdev);
3593         if (status)
3594                 return status;
3595
3596         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3597         if (status)
3598                 return status;
3599
3600         status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3601         if (status) {
3602                 netif_err(qdev, ifup, qdev->ndev,
3603                           "Failed to init routing register for error packets.\n");
3604                 goto exit;
3605         }
3606         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3607         if (status) {
3608                 netif_err(qdev, ifup, qdev->ndev,
3609                           "Failed to init routing register for broadcast packets.\n");
3610                 goto exit;
3611         }
3612         /* If we have more than one inbound queue, then turn on RSS in the
3613          * routing block.
3614          */
3615         if (qdev->rss_ring_count > 1) {
3616                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3617                                         RT_IDX_RSS_MATCH, 1);
3618                 if (status) {
3619                         netif_err(qdev, ifup, qdev->ndev,
3620                                   "Failed to init routing register for MATCH RSS packets.\n");
3621                         goto exit;
3622                 }
3623         }
3624
3625         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3626                                     RT_IDX_CAM_HIT, 1);
3627         if (status)
3628                 netif_err(qdev, ifup, qdev->ndev,
3629                           "Failed to init routing register for CAM packets.\n");
3630 exit:
3631         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3632         return status;
3633 }
3634
3635 int ql_cam_route_initialize(struct ql_adapter *qdev)
3636 {
3637         int status, set;
3638
3639         /* If check if the link is up and use to
3640          * determine if we are setting or clearing
3641          * the MAC address in the CAM.
3642          */
3643         set = ql_read32(qdev, STS);
3644         set &= qdev->port_link_up;
3645         status = ql_set_mac_addr(qdev, set);
3646         if (status) {
3647                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3648                 return status;
3649         }
3650
3651         status = ql_route_initialize(qdev);
3652         if (status)
3653                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3654
3655         return status;
3656 }
3657
3658 static int ql_adapter_initialize(struct ql_adapter *qdev)
3659 {
3660         u32 value, mask;
3661         int i;
3662         int status = 0;
3663
3664         /*
3665          * Set up the System register to halt on errors.
3666          */
3667         value = SYS_EFE | SYS_FAE;
3668         mask = value << 16;
3669         ql_write32(qdev, SYS, mask | value);
3670
3671         /* Set the default queue, and VLAN behavior. */
3672         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3673         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3674         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3675
3676         /* Set the MPI interrupt to enabled. */
3677         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3678
3679         /* Enable the function, set pagesize, enable error checking. */
3680         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3681             FSC_EC | FSC_VM_PAGE_4K;
3682         value |= SPLT_SETTING;
3683
3684         /* Set/clear header splitting. */
3685         mask = FSC_VM_PAGESIZE_MASK |
3686             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3687         ql_write32(qdev, FSC, mask | value);
3688
3689         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3690
3691         /* Set RX packet routing to use port/pci function on which the
3692          * packet arrived on in addition to usual frame routing.
3693          * This is helpful on bonding where both interfaces can have
3694          * the same MAC address.
3695          */
3696         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3697         /* Reroute all packets to our Interface.
3698          * They may have been routed to MPI firmware
3699          * due to WOL.
3700          */
3701         value = ql_read32(qdev, MGMT_RCV_CFG);
3702         value &= ~MGMT_RCV_CFG_RM;
3703         mask = 0xffff0000;
3704
3705         /* Sticky reg needs clearing due to WOL. */
3706         ql_write32(qdev, MGMT_RCV_CFG, mask);
3707         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3708
3709         /* Default WOL is enable on Mezz cards */
3710         if (qdev->pdev->subsystem_device == 0x0068 ||
3711                         qdev->pdev->subsystem_device == 0x0180)
3712                 qdev->wol = WAKE_MAGIC;
3713
3714         /* Start up the rx queues. */
3715         for (i = 0; i < qdev->rx_ring_count; i++) {
3716                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3717                 if (status) {
3718                         netif_err(qdev, ifup, qdev->ndev,
3719                                   "Failed to start rx ring[%d].\n", i);
3720                         return status;
3721                 }
3722         }
3723
3724         /* If there is more than one inbound completion queue
3725          * then download a RICB to configure RSS.
3726          */
3727         if (qdev->rss_ring_count > 1) {
3728                 status = ql_start_rss(qdev);
3729                 if (status) {
3730                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3731                         return status;
3732                 }
3733         }
3734
3735         /* Start up the tx queues. */
3736         for (i = 0; i < qdev->tx_ring_count; i++) {
3737                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3738                 if (status) {
3739                         netif_err(qdev, ifup, qdev->ndev,
3740                                   "Failed to start tx ring[%d].\n", i);
3741                         return status;
3742                 }
3743         }
3744
3745         /* Initialize the port and set the max framesize. */
3746         status = qdev->nic_ops->port_initialize(qdev);
3747         if (status)
3748                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3749
3750         /* Set up the MAC address and frame routing filter. */
3751         status = ql_cam_route_initialize(qdev);
3752         if (status) {
3753                 netif_err(qdev, ifup, qdev->ndev,
3754                           "Failed to init CAM/Routing tables.\n");
3755                 return status;
3756         }
3757
3758         /* Start NAPI for the RSS queues. */
3759         for (i = 0; i < qdev->rss_ring_count; i++) {
3760                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3761                              "Enabling NAPI for rx_ring[%d].\n", i);
3762                 napi_enable(&qdev->rx_ring[i].napi);
3763         }
3764
3765         return status;
3766 }
3767
3768 /* Issue soft reset to chip. */
3769 static int ql_adapter_reset(struct ql_adapter *qdev)
3770 {
3771         u32 value;
3772         int status = 0;
3773         unsigned long end_jiffies;
3774
3775         /* Clear all the entries in the routing table. */
3776         status = ql_clear_routing_entries(qdev);
3777         if (status) {
3778                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3779                 return status;
3780         }
3781
3782         end_jiffies = jiffies +
3783                 max((unsigned long)1, usecs_to_jiffies(30));
3784
3785         /* Stop management traffic. */
3786         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3787
3788         /* Wait for the NIC and MGMNT FIFOs to empty. */
3789         ql_wait_fifo_empty(qdev);
3790
3791         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3792
3793         do {
3794                 value = ql_read32(qdev, RST_FO);
3795                 if ((value & RST_FO_FR) == 0)
3796                         break;
3797                 cpu_relax();
3798         } while (time_before(jiffies, end_jiffies));
3799
3800         if (value & RST_FO_FR) {
3801                 netif_err(qdev, ifdown, qdev->ndev,
3802                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3803                 status = -ETIMEDOUT;
3804         }
3805
3806         /* Resume management traffic. */
3807         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3808         return status;
3809 }
3810
3811 static void ql_display_dev_info(struct net_device *ndev)
3812 {
3813         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3814
3815         netif_info(qdev, probe, qdev->ndev,
3816                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3817                    "XG Roll = %d, XG Rev = %d.\n",
3818                    qdev->func,
3819                    qdev->port,
3820                    qdev->chip_rev_id & 0x0000000f,
3821                    qdev->chip_rev_id >> 4 & 0x0000000f,
3822                    qdev->chip_rev_id >> 8 & 0x0000000f,
3823                    qdev->chip_rev_id >> 12 & 0x0000000f);
3824         netif_info(qdev, probe, qdev->ndev,
3825                    "MAC address %pM\n", ndev->dev_addr);
3826 }
3827
3828 int ql_wol(struct ql_adapter *qdev)
3829 {
3830         int status = 0;
3831         u32 wol = MB_WOL_DISABLE;
3832
3833         /* The CAM is still intact after a reset, but if we
3834          * are doing WOL, then we may need to program the
3835          * routing regs. We would also need to issue the mailbox
3836          * commands to instruct the MPI what to do per the ethtool
3837          * settings.
3838          */
3839
3840         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3841                         WAKE_MCAST | WAKE_BCAST)) {
3842                 netif_err(qdev, ifdown, qdev->ndev,
3843                           "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3844                           qdev->wol);
3845                 return -EINVAL;
3846         }
3847
3848         if (qdev->wol & WAKE_MAGIC) {
3849                 status = ql_mb_wol_set_magic(qdev, 1);
3850                 if (status) {
3851                         netif_err(qdev, ifdown, qdev->ndev,
3852                                   "Failed to set magic packet on %s.\n",
3853                                   qdev->ndev->name);
3854                         return status;
3855                 } else
3856                         netif_info(qdev, drv, qdev->ndev,
3857                                    "Enabled magic packet successfully on %s.\n",
3858                                    qdev->ndev->name);
3859
3860                 wol |= MB_WOL_MAGIC_PKT;
3861         }
3862
3863         if (qdev->wol) {
3864                 wol |= MB_WOL_MODE_ON;
3865                 status = ql_mb_wol_mode(qdev, wol);
3866                 netif_err(qdev, drv, qdev->ndev,
3867                           "WOL %s (wol code 0x%x) on %s\n",
3868                           (status == 0) ? "Sucessfully set" : "Failed",
3869                           wol, qdev->ndev->name);
3870         }
3871
3872         return status;
3873 }
3874
3875 static int ql_adapter_down(struct ql_adapter *qdev)
3876 {
3877         int i, status = 0;
3878
3879         ql_link_off(qdev);
3880
3881         /* Don't kill the reset worker thread if we
3882          * are in the process of recovery.
3883          */
3884         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3885                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3886         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3887         cancel_delayed_work_sync(&qdev->mpi_work);
3888         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3889         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3890         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3891
3892         for (i = 0; i < qdev->rss_ring_count; i++)
3893                 napi_disable(&qdev->rx_ring[i].napi);
3894
3895         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3896
3897         ql_disable_interrupts(qdev);
3898
3899         ql_tx_ring_clean(qdev);
3900
3901         /* Call netif_napi_del() from common point.
3902          */
3903         for (i = 0; i < qdev->rss_ring_count; i++)
3904                 netif_napi_del(&qdev->rx_ring[i].napi);
3905
3906         ql_free_rx_buffers(qdev);
3907
3908         status = ql_adapter_reset(qdev);
3909         if (status)
3910                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3911                           qdev->func);
3912         return status;
3913 }
3914
3915 static int ql_adapter_up(struct ql_adapter *qdev)
3916 {
3917         int err = 0;
3918
3919         err = ql_adapter_initialize(qdev);
3920         if (err) {
3921                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3922                 goto err_init;
3923         }
3924         set_bit(QL_ADAPTER_UP, &qdev->flags);
3925         ql_alloc_rx_buffers(qdev);
3926         /* If the port is initialized and the
3927          * link is up the turn on the carrier.
3928          */
3929         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3930                         (ql_read32(qdev, STS) & qdev->port_link_up))
3931                 ql_link_on(qdev);
3932         ql_enable_interrupts(qdev);
3933         ql_enable_all_completion_interrupts(qdev);
3934         netif_tx_start_all_queues(qdev->ndev);
3935
3936         return 0;
3937 err_init:
3938         ql_adapter_reset(qdev);
3939         return err;
3940 }
3941
3942 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3943 {
3944         ql_free_mem_resources(qdev);
3945         ql_free_irq(qdev);
3946 }
3947
3948 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3949 {
3950         int status = 0;
3951
3952         if (ql_alloc_mem_resources(qdev)) {
3953                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
3954                 return -ENOMEM;
3955         }
3956         status = ql_request_irq(qdev);
3957         return status;
3958 }
3959
3960 static int qlge_close(struct net_device *ndev)
3961 {
3962         struct ql_adapter *qdev = netdev_priv(ndev);
3963
3964         /* If we hit pci_channel_io_perm_failure
3965          * failure condition, then we already
3966          * brought the adapter down.
3967          */
3968         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3969                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3970                 clear_bit(QL_EEH_FATAL, &qdev->flags);
3971                 return 0;
3972         }
3973
3974         /*
3975          * Wait for device to recover from a reset.
3976          * (Rarely happens, but possible.)
3977          */
3978         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3979                 msleep(1);
3980         ql_adapter_down(qdev);
3981         ql_release_adapter_resources(qdev);
3982         return 0;
3983 }
3984
3985 static int ql_configure_rings(struct ql_adapter *qdev)
3986 {
3987         int i;
3988         struct rx_ring *rx_ring;
3989         struct tx_ring *tx_ring;
3990         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3991         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3992                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3993
3994         qdev->lbq_buf_order = get_order(lbq_buf_len);
3995
3996         /* In a perfect world we have one RSS ring for each CPU
3997          * and each has it's own vector.  To do that we ask for
3998          * cpu_cnt vectors.  ql_enable_msix() will adjust the
3999          * vector count to what we actually get.  We then
4000          * allocate an RSS ring for each.
4001          * Essentially, we are doing min(cpu_count, msix_vector_count).
4002          */
4003         qdev->intr_count = cpu_cnt;
4004         ql_enable_msix(qdev);
4005         /* Adjust the RSS ring count to the actual vector count. */
4006         qdev->rss_ring_count = qdev->intr_count;
4007         qdev->tx_ring_count = cpu_cnt;
4008         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4009
4010         for (i = 0; i < qdev->tx_ring_count; i++) {
4011                 tx_ring = &qdev->tx_ring[i];
4012                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4013                 tx_ring->qdev = qdev;
4014                 tx_ring->wq_id = i;
4015                 tx_ring->wq_len = qdev->tx_ring_size;
4016                 tx_ring->wq_size =
4017                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4018
4019                 /*
4020                  * The completion queue ID for the tx rings start
4021                  * immediately after the rss rings.
4022                  */
4023                 tx_ring->cq_id = qdev->rss_ring_count + i;
4024         }
4025
4026         for (i = 0; i < qdev->rx_ring_count; i++) {
4027                 rx_ring = &qdev->rx_ring[i];
4028                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4029                 rx_ring->qdev = qdev;
4030                 rx_ring->cq_id = i;
4031                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4032                 if (i < qdev->rss_ring_count) {
4033                         /*
4034                          * Inbound (RSS) queues.
4035                          */
4036                         rx_ring->cq_len = qdev->rx_ring_size;
4037                         rx_ring->cq_size =
4038                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4039                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4040                         rx_ring->lbq_size =
4041                             rx_ring->lbq_len * sizeof(__le64);
4042                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4043                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4044                                      "lbq_buf_size %d, order = %d\n",
4045                                      rx_ring->lbq_buf_size,
4046                                      qdev->lbq_buf_order);
4047                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4048                         rx_ring->sbq_size =
4049                             rx_ring->sbq_len * sizeof(__le64);
4050                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4051                         rx_ring->type = RX_Q;
4052                 } else {
4053                         /*
4054                          * Outbound queue handles outbound completions only.
4055                          */
4056                         /* outbound cq is same size as tx_ring it services. */
4057                         rx_ring->cq_len = qdev->tx_ring_size;
4058                         rx_ring->cq_size =
4059                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4060                         rx_ring->lbq_len = 0;
4061                         rx_ring->lbq_size = 0;
4062                         rx_ring->lbq_buf_size = 0;
4063                         rx_ring->sbq_len = 0;
4064                         rx_ring->sbq_size = 0;
4065                         rx_ring->sbq_buf_size = 0;
4066                         rx_ring->type = TX_Q;
4067                 }
4068         }
4069         return 0;
4070 }
4071
4072 static int qlge_open(struct net_device *ndev)
4073 {
4074         int err = 0;
4075         struct ql_adapter *qdev = netdev_priv(ndev);
4076
4077         err = ql_adapter_reset(qdev);
4078         if (err)
4079                 return err;
4080
4081         err = ql_configure_rings(qdev);
4082         if (err)
4083                 return err;
4084
4085         err = ql_get_adapter_resources(qdev);
4086         if (err)
4087                 goto error_up;
4088
4089         err = ql_adapter_up(qdev);
4090         if (err)
4091                 goto error_up;
4092
4093         return err;
4094
4095 error_up:
4096         ql_release_adapter_resources(qdev);
4097         return err;
4098 }
4099
4100 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4101 {
4102         struct rx_ring *rx_ring;
4103         int i, status;
4104         u32 lbq_buf_len;
4105
4106         /* Wait for an oustanding reset to complete. */
4107         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4108                 int i = 3;
4109                 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4110                         netif_err(qdev, ifup, qdev->ndev,
4111                                   "Waiting for adapter UP...\n");
4112                         ssleep(1);
4113                 }
4114
4115                 if (!i) {
4116                         netif_err(qdev, ifup, qdev->ndev,
4117                                   "Timed out waiting for adapter UP\n");
4118                         return -ETIMEDOUT;
4119                 }
4120         }
4121
4122         status = ql_adapter_down(qdev);
4123         if (status)
4124                 goto error;
4125
4126         /* Get the new rx buffer size. */
4127         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4128                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4129         qdev->lbq_buf_order = get_order(lbq_buf_len);
4130
4131         for (i = 0; i < qdev->rss_ring_count; i++) {
4132                 rx_ring = &qdev->rx_ring[i];
4133                 /* Set the new size. */
4134                 rx_ring->lbq_buf_size = lbq_buf_len;
4135         }
4136
4137         status = ql_adapter_up(qdev);
4138         if (status)
4139                 goto error;
4140
4141         return status;
4142 error:
4143         netif_alert(qdev, ifup, qdev->ndev,
4144                     "Driver up/down cycle failed, closing device.\n");
4145         set_bit(QL_ADAPTER_UP, &qdev->flags);
4146         dev_close(qdev->ndev);
4147         return status;
4148 }
4149
4150 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4151 {
4152         struct ql_adapter *qdev = netdev_priv(ndev);
4153         int status;
4154
4155         if (ndev->mtu == 1500 && new_mtu == 9000) {
4156                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4157         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4158                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4159         } else
4160                 return -EINVAL;
4161
4162         queue_delayed_work(qdev->workqueue,
4163                         &qdev->mpi_port_cfg_work, 3*HZ);
4164
4165         ndev->mtu = new_mtu;
4166
4167         if (!netif_running(qdev->ndev)) {
4168                 return 0;
4169         }
4170
4171         status = ql_change_rx_buffers(qdev);
4172         if (status) {
4173                 netif_err(qdev, ifup, qdev->ndev,
4174                           "Changing MTU failed.\n");
4175         }
4176
4177         return status;
4178 }
4179
4180 static struct net_device_stats *qlge_get_stats(struct net_device
4181                                                *ndev)
4182 {
4183         struct ql_adapter *qdev = netdev_priv(ndev);
4184         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4185         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4186         unsigned long pkts, mcast, dropped, errors, bytes;
4187         int i;
4188
4189         /* Get RX stats. */
4190         pkts = mcast = dropped = errors = bytes = 0;
4191         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4192                         pkts += rx_ring->rx_packets;
4193                         bytes += rx_ring->rx_bytes;
4194                         dropped += rx_ring->rx_dropped;
4195                         errors += rx_ring->rx_errors;
4196                         mcast += rx_ring->rx_multicast;
4197         }
4198         ndev->stats.rx_packets = pkts;
4199         ndev->stats.rx_bytes = bytes;
4200         ndev->stats.rx_dropped = dropped;
4201         ndev->stats.rx_errors = errors;
4202         ndev->stats.multicast = mcast;
4203
4204         /* Get TX stats. */
4205         pkts = errors = bytes = 0;
4206         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4207                         pkts += tx_ring->tx_packets;
4208                         bytes += tx_ring->tx_bytes;
4209                         errors += tx_ring->tx_errors;
4210         }
4211         ndev->stats.tx_packets = pkts;
4212         ndev->stats.tx_bytes = bytes;
4213         ndev->stats.tx_errors = errors;
4214         return &ndev->stats;
4215 }
4216
4217 static void qlge_set_multicast_list(struct net_device *ndev)
4218 {
4219         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4220         struct dev_mc_list *mc_ptr;
4221         int i, status;
4222
4223         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4224         if (status)
4225                 return;
4226         /*
4227          * Set or clear promiscuous mode if a
4228          * transition is taking place.
4229          */
4230         if (ndev->flags & IFF_PROMISC) {
4231                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4232                         if (ql_set_routing_reg
4233                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4234                                 netif_err(qdev, hw, qdev->ndev,
4235                                           "Failed to set promiscous mode.\n");
4236                         } else {
4237                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4238                         }
4239                 }
4240         } else {
4241                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4242                         if (ql_set_routing_reg
4243                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4244                                 netif_err(qdev, hw, qdev->ndev,
4245                                           "Failed to clear promiscous mode.\n");
4246                         } else {
4247                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4248                         }
4249                 }
4250         }
4251
4252         /*
4253          * Set or clear all multicast mode if a
4254          * transition is taking place.
4255          */
4256         if ((ndev->flags & IFF_ALLMULTI) ||
4257             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4258                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4259                         if (ql_set_routing_reg
4260                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4261                                 netif_err(qdev, hw, qdev->ndev,
4262                                           "Failed to set all-multi mode.\n");
4263                         } else {
4264                                 set_bit(QL_ALLMULTI, &qdev->flags);
4265                         }
4266                 }
4267         } else {
4268                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4269                         if (ql_set_routing_reg
4270                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4271                                 netif_err(qdev, hw, qdev->ndev,
4272                                           "Failed to clear all-multi mode.\n");
4273                         } else {
4274                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4275                         }
4276                 }
4277         }
4278
4279         if (!netdev_mc_empty(ndev)) {
4280                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4281                 if (status)
4282                         goto exit;
4283                 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
4284                      i++, mc_ptr = mc_ptr->next)
4285                         if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
4286                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4287                                 netif_err(qdev, hw, qdev->ndev,
4288                                           "Failed to loadmulticast address.\n");
4289                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4290                                 goto exit;
4291                         }
4292                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4293                 if (ql_set_routing_reg
4294                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4295                         netif_err(qdev, hw, qdev->ndev,
4296                                   "Failed to set multicast match mode.\n");
4297                 } else {
4298                         set_bit(QL_ALLMULTI, &qdev->flags);
4299                 }
4300         }
4301 exit:
4302         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4303 }
4304
4305 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4306 {
4307         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4308         struct sockaddr *addr = p;
4309         int status;
4310
4311         if (!is_valid_ether_addr(addr->sa_data))
4312                 return -EADDRNOTAVAIL;
4313         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4314
4315         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4316         if (status)
4317                 return status;
4318         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4319                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4320         if (status)
4321                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4322         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4323         return status;
4324 }
4325
4326 static void qlge_tx_timeout(struct net_device *ndev)
4327 {
4328         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4329         ql_queue_asic_error(qdev);
4330 }
4331
4332 static void ql_asic_reset_work(struct work_struct *work)
4333 {
4334         struct ql_adapter *qdev =
4335             container_of(work, struct ql_adapter, asic_reset_work.work);
4336         int status;
4337         rtnl_lock();
4338         status = ql_adapter_down(qdev);
4339         if (status)
4340                 goto error;
4341
4342         status = ql_adapter_up(qdev);
4343         if (status)
4344                 goto error;
4345
4346         /* Restore rx mode. */
4347         clear_bit(QL_ALLMULTI, &qdev->flags);
4348         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4349         qlge_set_multicast_list(qdev->ndev);
4350
4351         rtnl_unlock();
4352         return;
4353 error:
4354         netif_alert(qdev, ifup, qdev->ndev,
4355                     "Driver up/down cycle failed, closing device\n");
4356
4357         set_bit(QL_ADAPTER_UP, &qdev->flags);
4358         dev_close(qdev->ndev);
4359         rtnl_unlock();
4360 }
4361
4362 static struct nic_operations qla8012_nic_ops = {
4363         .get_flash              = ql_get_8012_flash_params,
4364         .port_initialize        = ql_8012_port_initialize,
4365 };
4366
4367 static struct nic_operations qla8000_nic_ops = {
4368         .get_flash              = ql_get_8000_flash_params,
4369         .port_initialize        = ql_8000_port_initialize,
4370 };
4371
4372 /* Find the pcie function number for the other NIC
4373  * on this chip.  Since both NIC functions share a
4374  * common firmware we have the lowest enabled function
4375  * do any common work.  Examples would be resetting
4376  * after a fatal firmware error, or doing a firmware
4377  * coredump.
4378  */
4379 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4380 {
4381         int status = 0;
4382         u32 temp;
4383         u32 nic_func1, nic_func2;
4384
4385         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4386                         &temp);
4387         if (status)
4388                 return status;
4389
4390         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4391                         MPI_TEST_NIC_FUNC_MASK);
4392         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4393                         MPI_TEST_NIC_FUNC_MASK);
4394
4395         if (qdev->func == nic_func1)
4396                 qdev->alt_func = nic_func2;
4397         else if (qdev->func == nic_func2)
4398                 qdev->alt_func = nic_func1;
4399         else
4400                 status = -EIO;
4401
4402         return status;
4403 }
4404
4405 static int ql_get_board_info(struct ql_adapter *qdev)
4406 {
4407         int status;
4408         qdev->func =
4409             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4410         if (qdev->func > 3)
4411                 return -EIO;
4412
4413         status = ql_get_alt_pcie_func(qdev);
4414         if (status)
4415                 return status;
4416
4417         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4418         if (qdev->port) {
4419                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4420                 qdev->port_link_up = STS_PL1;
4421                 qdev->port_init = STS_PI1;
4422                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4423                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4424         } else {
4425                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4426                 qdev->port_link_up = STS_PL0;
4427                 qdev->port_init = STS_PI0;
4428                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4429                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4430         }
4431         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4432         qdev->device_id = qdev->pdev->device;
4433         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4434                 qdev->nic_ops = &qla8012_nic_ops;
4435         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4436                 qdev->nic_ops = &qla8000_nic_ops;
4437         return status;
4438 }
4439
4440 static void ql_release_all(struct pci_dev *pdev)
4441 {
4442         struct net_device *ndev = pci_get_drvdata(pdev);
4443         struct ql_adapter *qdev = netdev_priv(ndev);
4444
4445         if (qdev->workqueue) {
4446                 destroy_workqueue(qdev->workqueue);
4447                 qdev->workqueue = NULL;
4448         }
4449
4450         if (qdev->reg_base)
4451                 iounmap(qdev->reg_base);
4452         if (qdev->doorbell_area)
4453                 iounmap(qdev->doorbell_area);
4454         vfree(qdev->mpi_coredump);
4455         pci_release_regions(pdev);
4456         pci_set_drvdata(pdev, NULL);
4457 }
4458
4459 static int __devinit ql_init_device(struct pci_dev *pdev,
4460                                     struct net_device *ndev, int cards_found)
4461 {
4462         struct ql_adapter *qdev = netdev_priv(ndev);
4463         int err = 0;
4464
4465         memset((void *)qdev, 0, sizeof(*qdev));
4466         err = pci_enable_device(pdev);
4467         if (err) {
4468                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4469                 return err;
4470         }
4471
4472         qdev->ndev = ndev;
4473         qdev->pdev = pdev;
4474         pci_set_drvdata(pdev, ndev);
4475
4476         /* Set PCIe read request size */
4477         err = pcie_set_readrq(pdev, 4096);
4478         if (err) {
4479                 dev_err(&pdev->dev, "Set readrq failed.\n");
4480                 goto err_out1;
4481         }
4482
4483         err = pci_request_regions(pdev, DRV_NAME);
4484         if (err) {
4485                 dev_err(&pdev->dev, "PCI region request failed.\n");
4486                 return err;
4487         }
4488
4489         pci_set_master(pdev);
4490         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4491                 set_bit(QL_DMA64, &qdev->flags);
4492                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4493         } else {
4494                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4495                 if (!err)
4496                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4497         }
4498
4499         if (err) {
4500                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4501                 goto err_out2;
4502         }
4503
4504         /* Set PCIe reset type for EEH to fundamental. */
4505         pdev->needs_freset = 1;
4506         pci_save_state(pdev);
4507         qdev->reg_base =
4508             ioremap_nocache(pci_resource_start(pdev, 1),
4509                             pci_resource_len(pdev, 1));
4510         if (!qdev->reg_base) {
4511                 dev_err(&pdev->dev, "Register mapping failed.\n");
4512                 err = -ENOMEM;
4513                 goto err_out2;
4514         }
4515
4516         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4517         qdev->doorbell_area =
4518             ioremap_nocache(pci_resource_start(pdev, 3),
4519                             pci_resource_len(pdev, 3));
4520         if (!qdev->doorbell_area) {
4521                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4522                 err = -ENOMEM;
4523                 goto err_out2;
4524         }
4525
4526         err = ql_get_board_info(qdev);
4527         if (err) {
4528                 dev_err(&pdev->dev, "Register access failed.\n");
4529                 err = -EIO;
4530                 goto err_out2;
4531         }
4532         qdev->msg_enable = netif_msg_init(debug, default_msg);
4533         spin_lock_init(&qdev->hw_lock);
4534         spin_lock_init(&qdev->stats_lock);
4535
4536         if (qlge_mpi_coredump) {
4537                 qdev->mpi_coredump =
4538                         vmalloc(sizeof(struct ql_mpi_coredump));
4539                 if (qdev->mpi_coredump == NULL) {
4540                         dev_err(&pdev->dev, "Coredump alloc failed.\n");
4541                         err = -ENOMEM;
4542                         goto err_out2;
4543                 }
4544                 if (qlge_force_coredump)
4545                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4546         }
4547         /* make sure the EEPROM is good */
4548         err = qdev->nic_ops->get_flash(qdev);
4549         if (err) {
4550                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4551                 goto err_out2;
4552         }
4553
4554         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4555
4556         /* Set up the default ring sizes. */
4557         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4558         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4559
4560         /* Set up the coalescing parameters. */
4561         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4562         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4563         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4564         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4565
4566         /*
4567          * Set up the operating parameters.
4568          */
4569         qdev->rx_csum = 1;
4570         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4571         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4572         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4573         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4574         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4575         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4576         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4577         init_completion(&qdev->ide_completion);
4578
4579         if (!cards_found) {
4580                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4581                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4582                          DRV_NAME, DRV_VERSION);
4583         }
4584         return 0;
4585 err_out2:
4586         ql_release_all(pdev);
4587 err_out1:
4588         pci_disable_device(pdev);
4589         return err;
4590 }
4591
4592 static const struct net_device_ops qlge_netdev_ops = {
4593         .ndo_open               = qlge_open,
4594         .ndo_stop               = qlge_close,
4595         .ndo_start_xmit         = qlge_send,
4596         .ndo_change_mtu         = qlge_change_mtu,
4597         .ndo_get_stats          = qlge_get_stats,
4598         .ndo_set_multicast_list = qlge_set_multicast_list,
4599         .ndo_set_mac_address    = qlge_set_mac_address,
4600         .ndo_validate_addr      = eth_validate_addr,
4601         .ndo_tx_timeout         = qlge_tx_timeout,
4602         .ndo_vlan_rx_register   = qlge_vlan_rx_register,
4603         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4604         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4605 };
4606
4607 static void ql_timer(unsigned long data)
4608 {
4609         struct ql_adapter *qdev = (struct ql_adapter *)data;
4610         u32 var = 0;
4611
4612         var = ql_read32(qdev, STS);
4613         if (pci_channel_offline(qdev->pdev)) {
4614                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4615                 return;
4616         }
4617
4618         qdev->timer.expires = jiffies + (5*HZ);
4619         add_timer(&qdev->timer);
4620 }
4621
4622 static int __devinit qlge_probe(struct pci_dev *pdev,
4623                                 const struct pci_device_id *pci_entry)
4624 {
4625         struct net_device *ndev = NULL;
4626         struct ql_adapter *qdev = NULL;
4627         static int cards_found = 0;
4628         int err = 0;
4629
4630         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4631                         min(MAX_CPUS, (int)num_online_cpus()));
4632         if (!ndev)
4633                 return -ENOMEM;
4634
4635         err = ql_init_device(pdev, ndev, cards_found);
4636         if (err < 0) {
4637                 free_netdev(ndev);
4638                 return err;
4639         }
4640
4641         qdev = netdev_priv(ndev);
4642         SET_NETDEV_DEV(ndev, &pdev->dev);
4643         ndev->features = (0
4644                           | NETIF_F_IP_CSUM
4645                           | NETIF_F_SG
4646                           | NETIF_F_TSO
4647                           | NETIF_F_TSO6
4648                           | NETIF_F_TSO_ECN
4649                           | NETIF_F_HW_VLAN_TX
4650                           | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4651         ndev->features |= NETIF_F_GRO;
4652
4653         if (test_bit(QL_DMA64, &qdev->flags))
4654                 ndev->features |= NETIF_F_HIGHDMA;
4655
4656         /*
4657          * Set up net_device structure.
4658          */
4659         ndev->tx_queue_len = qdev->tx_ring_size;
4660         ndev->irq = pdev->irq;
4661
4662         ndev->netdev_ops = &qlge_netdev_ops;
4663         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4664         ndev->watchdog_timeo = 10 * HZ;
4665
4666         err = register_netdev(ndev);
4667         if (err) {
4668                 dev_err(&pdev->dev, "net device registration failed.\n");
4669                 ql_release_all(pdev);
4670                 pci_disable_device(pdev);
4671                 return err;
4672         }
4673         /* Start up the timer to trigger EEH if
4674          * the bus goes dead
4675          */
4676         init_timer_deferrable(&qdev->timer);
4677         qdev->timer.data = (unsigned long)qdev;
4678         qdev->timer.function = ql_timer;
4679         qdev->timer.expires = jiffies + (5*HZ);
4680         add_timer(&qdev->timer);
4681         ql_link_off(qdev);
4682         ql_display_dev_info(ndev);
4683         atomic_set(&qdev->lb_count, 0);
4684         cards_found++;
4685         return 0;
4686 }
4687
4688 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4689 {
4690         return qlge_send(skb, ndev);
4691 }
4692
4693 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4694 {
4695         return ql_clean_inbound_rx_ring(rx_ring, budget);
4696 }
4697
4698 static void __devexit qlge_remove(struct pci_dev *pdev)
4699 {
4700         struct net_device *ndev = pci_get_drvdata(pdev);
4701         struct ql_adapter *qdev = netdev_priv(ndev);
4702         del_timer_sync(&qdev->timer);
4703         unregister_netdev(ndev);
4704         ql_release_all(pdev);
4705         pci_disable_device(pdev);
4706         free_netdev(ndev);
4707 }
4708
4709 /* Clean up resources without touching hardware. */
4710 static void ql_eeh_close(struct net_device *ndev)
4711 {
4712         int i;
4713         struct ql_adapter *qdev = netdev_priv(ndev);
4714
4715         if (netif_carrier_ok(ndev)) {
4716                 netif_carrier_off(ndev);
4717                 netif_stop_queue(ndev);
4718         }
4719
4720         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4721                 cancel_delayed_work_sync(&qdev->asic_reset_work);
4722         cancel_delayed_work_sync(&qdev->mpi_reset_work);
4723         cancel_delayed_work_sync(&qdev->mpi_work);
4724         cancel_delayed_work_sync(&qdev->mpi_idc_work);
4725         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
4726         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4727
4728         for (i = 0; i < qdev->rss_ring_count; i++)
4729                 netif_napi_del(&qdev->rx_ring[i].napi);
4730
4731         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4732         ql_tx_ring_clean(qdev);
4733         ql_free_rx_buffers(qdev);
4734         ql_release_adapter_resources(qdev);
4735 }
4736
4737 /*
4738  * This callback is called by the PCI subsystem whenever
4739  * a PCI bus error is detected.
4740  */
4741 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4742                                                enum pci_channel_state state)
4743 {
4744         struct net_device *ndev = pci_get_drvdata(pdev);
4745         struct ql_adapter *qdev = netdev_priv(ndev);
4746
4747         switch (state) {
4748         case pci_channel_io_normal:
4749                 return PCI_ERS_RESULT_CAN_RECOVER;
4750         case pci_channel_io_frozen:
4751                 netif_device_detach(ndev);
4752                 if (netif_running(ndev))
4753                         ql_eeh_close(ndev);
4754                 pci_disable_device(pdev);
4755                 return PCI_ERS_RESULT_NEED_RESET;
4756         case pci_channel_io_perm_failure:
4757                 dev_err(&pdev->dev,
4758                         "%s: pci_channel_io_perm_failure.\n", __func__);
4759                 ql_eeh_close(ndev);
4760                 set_bit(QL_EEH_FATAL, &qdev->flags);
4761                 return PCI_ERS_RESULT_DISCONNECT;
4762         }
4763
4764         /* Request a slot reset. */
4765         return PCI_ERS_RESULT_NEED_RESET;
4766 }
4767
4768 /*
4769  * This callback is called after the PCI buss has been reset.
4770  * Basically, this tries to restart the card from scratch.
4771  * This is a shortened version of the device probe/discovery code,
4772  * it resembles the first-half of the () routine.
4773  */
4774 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4775 {
4776         struct net_device *ndev = pci_get_drvdata(pdev);
4777         struct ql_adapter *qdev = netdev_priv(ndev);
4778
4779         pdev->error_state = pci_channel_io_normal;
4780
4781         pci_restore_state(pdev);
4782         if (pci_enable_device(pdev)) {
4783                 netif_err(qdev, ifup, qdev->ndev,
4784                           "Cannot re-enable PCI device after reset.\n");
4785                 return PCI_ERS_RESULT_DISCONNECT;
4786         }
4787         pci_set_master(pdev);
4788
4789         if (ql_adapter_reset(qdev)) {
4790                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4791                 set_bit(QL_EEH_FATAL, &qdev->flags);
4792                 return PCI_ERS_RESULT_DISCONNECT;
4793         }
4794
4795         return PCI_ERS_RESULT_RECOVERED;
4796 }
4797
4798 static void qlge_io_resume(struct pci_dev *pdev)
4799 {
4800         struct net_device *ndev = pci_get_drvdata(pdev);
4801         struct ql_adapter *qdev = netdev_priv(ndev);
4802         int err = 0;
4803
4804         if (netif_running(ndev)) {
4805                 err = qlge_open(ndev);
4806                 if (err) {
4807                         netif_err(qdev, ifup, qdev->ndev,
4808                                   "Device initialization failed after reset.\n");
4809                         return;
4810                 }
4811         } else {
4812                 netif_err(qdev, ifup, qdev->ndev,
4813                           "Device was not running prior to EEH.\n");
4814         }
4815         qdev->timer.expires = jiffies + (5*HZ);
4816         add_timer(&qdev->timer);
4817         netif_device_attach(ndev);
4818 }
4819
4820 static struct pci_error_handlers qlge_err_handler = {
4821         .error_detected = qlge_io_error_detected,
4822         .slot_reset = qlge_io_slot_reset,
4823         .resume = qlge_io_resume,
4824 };
4825
4826 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4827 {
4828         struct net_device *ndev = pci_get_drvdata(pdev);
4829         struct ql_adapter *qdev = netdev_priv(ndev);
4830         int err;
4831
4832         netif_device_detach(ndev);
4833         del_timer_sync(&qdev->timer);
4834
4835         if (netif_running(ndev)) {
4836                 err = ql_adapter_down(qdev);
4837                 if (!err)
4838                         return err;
4839         }
4840
4841         ql_wol(qdev);
4842         err = pci_save_state(pdev);
4843         if (err)
4844                 return err;
4845
4846         pci_disable_device(pdev);
4847
4848         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4849
4850         return 0;
4851 }
4852
4853 #ifdef CONFIG_PM
4854 static int qlge_resume(struct pci_dev *pdev)
4855 {
4856         struct net_device *ndev = pci_get_drvdata(pdev);
4857         struct ql_adapter *qdev = netdev_priv(ndev);
4858         int err;
4859
4860         pci_set_power_state(pdev, PCI_D0);
4861         pci_restore_state(pdev);
4862         err = pci_enable_device(pdev);
4863         if (err) {
4864                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4865                 return err;
4866         }
4867         pci_set_master(pdev);
4868
4869         pci_enable_wake(pdev, PCI_D3hot, 0);
4870         pci_enable_wake(pdev, PCI_D3cold, 0);
4871
4872         if (netif_running(ndev)) {
4873                 err = ql_adapter_up(qdev);
4874                 if (err)
4875                         return err;
4876         }
4877
4878         qdev->timer.expires = jiffies + (5*HZ);
4879         add_timer(&qdev->timer);
4880         netif_device_attach(ndev);
4881
4882         return 0;
4883 }
4884 #endif /* CONFIG_PM */
4885
4886 static void qlge_shutdown(struct pci_dev *pdev)
4887 {
4888         qlge_suspend(pdev, PMSG_SUSPEND);
4889 }
4890
4891 static struct pci_driver qlge_driver = {
4892         .name = DRV_NAME,
4893         .id_table = qlge_pci_tbl,
4894         .probe = qlge_probe,
4895         .remove = __devexit_p(qlge_remove),
4896 #ifdef CONFIG_PM
4897         .suspend = qlge_suspend,
4898         .resume = qlge_resume,
4899 #endif
4900         .shutdown = qlge_shutdown,
4901         .err_handler = &qlge_err_handler
4902 };
4903
4904 static int __init qlge_init_module(void)
4905 {
4906         return pci_register_driver(&qlge_driver);
4907 }
4908
4909 static void __exit qlge_exit(void)
4910 {
4911         pci_unregister_driver(&qlge_driver);
4912 }
4913
4914 module_init(qlge_init_module);
4915 module_exit(qlge_exit);