2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
45 char qlge_driver_name[] = DRV_NAME;
46 const char qlge_driver_version[] = DRV_VERSION;
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
53 static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55 /* NETIF_MSG_TIMER | */
60 /* NETIF_MSG_TX_QUEUED | */
61 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65 static int debug = 0x00007fff; /* defaults above */
66 module_param(debug, int, 0);
67 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
72 static int qlge_irq_type = MSIX_IRQ;
73 module_param(qlge_irq_type, int, MSIX_IRQ);
74 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 static int qlge_mpi_coredump;
77 module_param(qlge_mpi_coredump, int, 0);
78 MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
80 "Default is OFF - Do Not allocate memory. ");
82 static int qlge_force_coredump;
83 module_param(qlge_force_coredump, int, 0);
84 MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
88 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
89 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
91 /* required last entry */
95 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97 /* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver.
101 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
106 case SEM_XGMAC0_MASK:
107 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
109 case SEM_XGMAC1_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
113 sem_bits = SEM_SET << SEM_ICB_SHIFT;
115 case SEM_MAC_ADDR_MASK:
116 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
119 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
122 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
124 case SEM_RT_IDX_MASK:
125 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
127 case SEM_PROC_REG_MASK:
128 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
131 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
135 ql_write32(qdev, SEM, sem_bits | sem_mask);
136 return !(ql_read32(qdev, SEM) & sem_bits);
139 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
141 unsigned int wait_count = 30;
143 if (!ql_sem_trylock(qdev, sem_mask))
146 } while (--wait_count);
150 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
152 ql_write32(qdev, SEM, sem_mask);
153 ql_read32(qdev, SEM); /* flush */
156 /* This function waits for a specific bit to come ready
157 * in a given register. It is used mostly by the initialize
158 * process, but is also used in kernel thread API such as
159 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
161 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
164 int count = UDELAY_COUNT;
167 temp = ql_read32(qdev, reg);
169 /* check for errors */
170 if (temp & err_bit) {
171 netif_alert(qdev, probe, qdev->ndev,
172 "register 0x%.08x access error, value = 0x%.08x!.\n",
175 } else if (temp & bit)
177 udelay(UDELAY_DELAY);
180 netif_alert(qdev, probe, qdev->ndev,
181 "Timed out waiting for reg %x to come ready.\n", reg);
185 /* The CFG register is used to download TX and RX control blocks
186 * to the chip. This function waits for an operation to complete.
188 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
190 int count = UDELAY_COUNT;
194 temp = ql_read32(qdev, CFG);
199 udelay(UDELAY_DELAY);
206 /* Used to issue init control blocks to hw. Maps control block,
207 * sets address, triggers download, waits for completion.
209 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
219 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
222 map = pci_map_single(qdev->pdev, ptr, size, direction);
223 if (pci_dma_mapping_error(qdev->pdev, map)) {
224 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
228 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
232 status = ql_wait_cfg(qdev, bit);
234 netif_err(qdev, ifup, qdev->ndev,
235 "Timed out waiting for CFG to come ready.\n");
239 ql_write32(qdev, ICB_L, (u32) map);
240 ql_write32(qdev, ICB_H, (u32) (map >> 32));
242 mask = CFG_Q_MASK | (bit << 16);
243 value = bit | (q_id << CFG_Q_SHIFT);
244 ql_write32(qdev, CFG, (mask | value));
247 * Wait for the bit to clear after signaling hw.
249 status = ql_wait_cfg(qdev, bit);
251 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
252 pci_unmap_single(qdev->pdev, map, size, direction);
256 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
257 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
264 case MAC_ADDR_TYPE_MULTI_MAC:
265 case MAC_ADDR_TYPE_CAM_MAC:
268 ql_wait_reg_rdy(qdev,
269 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
272 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
273 (index << MAC_ADDR_IDX_SHIFT) | /* index */
274 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
276 ql_wait_reg_rdy(qdev,
277 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
280 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
282 ql_wait_reg_rdy(qdev,
283 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
286 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
287 (index << MAC_ADDR_IDX_SHIFT) | /* index */
288 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
290 ql_wait_reg_rdy(qdev,
291 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
294 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
295 if (type == MAC_ADDR_TYPE_CAM_MAC) {
297 ql_wait_reg_rdy(qdev,
298 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
301 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
302 (index << MAC_ADDR_IDX_SHIFT) | /* index */
303 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
305 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
309 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
313 case MAC_ADDR_TYPE_VLAN:
314 case MAC_ADDR_TYPE_MULTI_FLTR:
316 netif_crit(qdev, ifup, qdev->ndev,
317 "Address type %d not yet supported.\n", type);
324 /* Set up a MAC, multicast or VLAN address for the
325 * inbound frame matching.
327 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334 case MAC_ADDR_TYPE_MULTI_MAC:
336 u32 upper = (addr[0] << 8) | addr[1];
337 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
338 (addr[4] << 8) | (addr[5]);
341 ql_wait_reg_rdy(qdev,
342 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
345 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
346 (index << MAC_ADDR_IDX_SHIFT) |
348 ql_write32(qdev, MAC_ADDR_DATA, lower);
350 ql_wait_reg_rdy(qdev,
351 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
354 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
355 (index << MAC_ADDR_IDX_SHIFT) |
358 ql_write32(qdev, MAC_ADDR_DATA, upper);
360 ql_wait_reg_rdy(qdev,
361 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
366 case MAC_ADDR_TYPE_CAM_MAC:
369 u32 upper = (addr[0] << 8) | addr[1];
371 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
374 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
375 "Adding %s address %pM at index %d in the CAM.\n",
376 type == MAC_ADDR_TYPE_MULTI_MAC ?
377 "MULTICAST" : "UNICAST",
381 ql_wait_reg_rdy(qdev,
382 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
385 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
386 (index << MAC_ADDR_IDX_SHIFT) | /* index */
388 ql_write32(qdev, MAC_ADDR_DATA, lower);
390 ql_wait_reg_rdy(qdev,
391 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
394 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
395 (index << MAC_ADDR_IDX_SHIFT) | /* index */
397 ql_write32(qdev, MAC_ADDR_DATA, upper);
399 ql_wait_reg_rdy(qdev,
400 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
403 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
404 (index << MAC_ADDR_IDX_SHIFT) | /* index */
406 /* This field should also include the queue id
407 and possibly the function id. Right now we hardcode
408 the route field to NIC core.
410 cam_output = (CAM_OUT_ROUTE_NIC |
412 func << CAM_OUT_FUNC_SHIFT) |
413 (0 << CAM_OUT_CQ_ID_SHIFT));
415 cam_output |= CAM_OUT_RV;
416 /* route to NIC core */
417 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
420 case MAC_ADDR_TYPE_VLAN:
422 u32 enable_bit = *((u32 *) &addr[0]);
423 /* For VLAN, the addr actually holds a bit that
424 * either enables or disables the vlan id we are
425 * addressing. It's either MAC_ADDR_E on or off.
426 * That's bit-27 we're talking about.
428 netif_info(qdev, ifup, qdev->ndev,
429 "%s VLAN ID %d %s the CAM.\n",
430 enable_bit ? "Adding" : "Removing",
432 enable_bit ? "to" : "from");
435 ql_wait_reg_rdy(qdev,
436 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
439 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
440 (index << MAC_ADDR_IDX_SHIFT) | /* index */
442 enable_bit); /* enable/disable */
445 case MAC_ADDR_TYPE_MULTI_FLTR:
447 netif_crit(qdev, ifup, qdev->ndev,
448 "Address type %d not yet supported.\n", type);
455 /* Set or clear MAC address in hardware. We sometimes
456 * have to clear it to prevent wrong frame routing
457 * especially in a bonding environment.
459 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
462 char zero_mac_addr[ETH_ALEN];
466 addr = &qdev->ndev->dev_addr[0];
467 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
468 "Set Mac addr %pM\n", addr);
470 memset(zero_mac_addr, 0, ETH_ALEN);
471 addr = &zero_mac_addr[0];
472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Clearing MAC address\n");
475 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
478 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
479 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
480 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
482 netif_err(qdev, ifup, qdev->ndev,
483 "Failed to init mac address.\n");
487 void ql_link_on(struct ql_adapter *qdev)
489 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
490 netif_carrier_on(qdev->ndev);
491 ql_set_mac_addr(qdev, 1);
494 void ql_link_off(struct ql_adapter *qdev)
496 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
497 netif_carrier_off(qdev->ndev);
498 ql_set_mac_addr(qdev, 0);
501 /* Get a specific frame routing value from the CAM.
502 * Used for debug and reg dump.
504 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
508 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
512 ql_write32(qdev, RT_IDX,
513 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
514 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
517 *value = ql_read32(qdev, RT_DATA);
522 /* The NIC function for this chip has 16 routing indexes. Each one can be used
523 * to route different frame types to various inbound queues. We send broadcast/
524 * multicast/error frames to the default queue for slow handling,
525 * and CAM hit/RSS frames to the fast handling queues.
527 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
530 int status = -EINVAL; /* Return error if no mask match. */
533 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
534 "%s %s mask %s the routing reg.\n",
535 enable ? "Adding" : "Removing",
536 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
537 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
538 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
539 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
540 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
541 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
542 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
543 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
544 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
545 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
546 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
547 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
548 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
549 index == RT_IDX_UNUSED013 ? "UNUSED13" :
550 index == RT_IDX_UNUSED014 ? "UNUSED14" :
551 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
552 "(Bad index != RT_IDX)",
553 enable ? "to" : "from");
558 value = RT_IDX_DST_CAM_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
563 case RT_IDX_VALID: /* Promiscuous Mode frames. */
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
570 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
572 value = RT_IDX_DST_DFLT_Q | /* dest */
573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
577 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
579 value = RT_IDX_DST_DFLT_Q | /* dest */
580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
584 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
586 value = RT_IDX_DST_DFLT_Q | /* dest */
587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
591 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
598 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
600 value = RT_IDX_DST_RSS | /* dest */
601 RT_IDX_TYPE_NICQ | /* type */
602 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
605 case 0: /* Clear the E-bit on an entry. */
607 value = RT_IDX_DST_DFLT_Q | /* dest */
608 RT_IDX_TYPE_NICQ | /* type */
609 (index << RT_IDX_IDX_SHIFT);/* index */
613 netif_err(qdev, ifup, qdev->ndev,
614 "Mask type %d not yet supported.\n", mask);
620 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
623 value |= (enable ? RT_IDX_E : 0);
624 ql_write32(qdev, RT_IDX, value);
625 ql_write32(qdev, RT_DATA, enable ? mask : 0);
631 static void ql_enable_interrupts(struct ql_adapter *qdev)
633 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
636 static void ql_disable_interrupts(struct ql_adapter *qdev)
638 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
641 /* If we're running with multiple MSI-X vectors then we enable on the fly.
642 * Otherwise, we may have multiple outstanding workers and don't want to
643 * enable until the last one finishes. In this case, the irq_cnt gets
644 * incremented everytime we queue a worker and decremented everytime
645 * a worker finishes. Once it hits zero we enable the interrupt.
647 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
650 unsigned long hw_flags = 0;
651 struct intr_context *ctx = qdev->intr_context + intr;
653 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
654 /* Always enable if we're MSIX multi interrupts and
655 * it's not the default (zeroeth) interrupt.
657 ql_write32(qdev, INTR_EN,
659 var = ql_read32(qdev, STS);
663 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
664 if (atomic_dec_and_test(&ctx->irq_cnt)) {
665 ql_write32(qdev, INTR_EN,
667 var = ql_read32(qdev, STS);
669 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
673 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
676 struct intr_context *ctx;
678 /* HW disables for us if we're MSIX multi interrupts and
679 * it's not the default (zeroeth) interrupt.
681 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
684 ctx = qdev->intr_context + intr;
685 spin_lock(&qdev->hw_lock);
686 if (!atomic_read(&ctx->irq_cnt)) {
687 ql_write32(qdev, INTR_EN,
689 var = ql_read32(qdev, STS);
691 atomic_inc(&ctx->irq_cnt);
692 spin_unlock(&qdev->hw_lock);
696 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
699 for (i = 0; i < qdev->intr_count; i++) {
700 /* The enable call does a atomic_dec_and_test
701 * and enables only if the result is zero.
702 * So we precharge it here.
704 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
706 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
707 ql_enable_completion_interrupt(qdev, i);
712 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
716 __le16 *flash = (__le16 *)&qdev->flash;
718 status = strncmp((char *)&qdev->flash, str, 4);
720 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
724 for (i = 0; i < size; i++)
725 csum += le16_to_cpu(*flash++);
728 netif_err(qdev, ifup, qdev->ndev,
729 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
734 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
737 /* wait for reg to come ready */
738 status = ql_wait_reg_rdy(qdev,
739 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
742 /* set up for reg read */
743 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
744 /* wait for reg to come ready */
745 status = ql_wait_reg_rdy(qdev,
746 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
749 /* This data is stored on flash as an array of
750 * __le32. Since ql_read32() returns cpu endian
751 * we need to swap it back.
753 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
758 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
762 __le32 *p = (__le32 *)&qdev->flash;
766 /* Get flash offset for function and adjust
770 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
772 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
774 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
777 size = sizeof(struct flash_params_8000) / sizeof(u32);
778 for (i = 0; i < size; i++, p++) {
779 status = ql_read_flash_word(qdev, i+offset, p);
781 netif_err(qdev, ifup, qdev->ndev,
782 "Error reading flash.\n");
787 status = ql_validate_flash(qdev,
788 sizeof(struct flash_params_8000) / sizeof(u16),
791 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
796 /* Extract either manufacturer or BOFM modified
799 if (qdev->flash.flash_params_8000.data_type1 == 2)
801 qdev->flash.flash_params_8000.mac_addr1,
802 qdev->ndev->addr_len);
805 qdev->flash.flash_params_8000.mac_addr,
806 qdev->ndev->addr_len);
808 if (!is_valid_ether_addr(mac_addr)) {
809 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
814 memcpy(qdev->ndev->dev_addr,
816 qdev->ndev->addr_len);
819 ql_sem_unlock(qdev, SEM_FLASH_MASK);
823 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
827 __le32 *p = (__le32 *)&qdev->flash;
829 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
831 /* Second function's parameters follow the first
837 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
840 for (i = 0; i < size; i++, p++) {
841 status = ql_read_flash_word(qdev, i+offset, p);
843 netif_err(qdev, ifup, qdev->ndev,
844 "Error reading flash.\n");
850 status = ql_validate_flash(qdev,
851 sizeof(struct flash_params_8012) / sizeof(u16),
854 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
859 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
864 memcpy(qdev->ndev->dev_addr,
865 qdev->flash.flash_params_8012.mac_addr,
866 qdev->ndev->addr_len);
869 ql_sem_unlock(qdev, SEM_FLASH_MASK);
873 /* xgmac register are located behind the xgmac_addr and xgmac_data
874 * register pair. Each read/write requires us to wait for the ready
875 * bit before reading/writing the data.
877 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
880 /* wait for reg to come ready */
881 status = ql_wait_reg_rdy(qdev,
882 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
885 /* write the data to the data reg */
886 ql_write32(qdev, XGMAC_DATA, data);
887 /* trigger the write */
888 ql_write32(qdev, XGMAC_ADDR, reg);
892 /* xgmac register are located behind the xgmac_addr and xgmac_data
893 * register pair. Each read/write requires us to wait for the ready
894 * bit before reading/writing the data.
896 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
899 /* wait for reg to come ready */
900 status = ql_wait_reg_rdy(qdev,
901 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904 /* set up for reg read */
905 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
906 /* wait for reg to come ready */
907 status = ql_wait_reg_rdy(qdev,
908 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
912 *data = ql_read32(qdev, XGMAC_DATA);
917 /* This is used for reading the 64-bit statistics regs. */
918 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
924 status = ql_read_xgmac_reg(qdev, reg, &lo);
928 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
932 *data = (u64) lo | ((u64) hi << 32);
938 static int ql_8000_port_initialize(struct ql_adapter *qdev)
942 * Get MPI firmware version for driver banner
945 status = ql_mb_about_fw(qdev);
948 status = ql_mb_get_fw_state(qdev);
951 /* Wake up a worker to get/set the TX/RX frame sizes. */
952 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
957 /* Take the MAC Core out of reset.
958 * Enable statistics counting.
959 * Take the transmitter/receiver out of reset.
960 * This functionality may be done in the MPI firmware at a
963 static int ql_8012_port_initialize(struct ql_adapter *qdev)
968 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
969 /* Another function has the semaphore, so
970 * wait for the port init bit to come ready.
972 netif_info(qdev, link, qdev->ndev,
973 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
974 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
976 netif_crit(qdev, link, qdev->ndev,
977 "Port initialize timed out.\n");
982 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
983 /* Set the core reset. */
984 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
987 data |= GLOBAL_CFG_RESET;
988 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
992 /* Clear the core reset and turn on jumbo for receiver. */
993 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
994 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
995 data |= GLOBAL_CFG_TX_STAT_EN;
996 data |= GLOBAL_CFG_RX_STAT_EN;
997 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1001 /* Enable transmitter, and clear it's reset. */
1002 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1005 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1006 data |= TX_CFG_EN; /* Enable the transmitter. */
1007 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1011 /* Enable receiver and clear it's reset. */
1012 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1015 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1016 data |= RX_CFG_EN; /* Enable the receiver. */
1017 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1021 /* Turn on jumbo. */
1023 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1027 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1031 /* Signal to the world that the port is enabled. */
1032 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1034 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1038 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1040 return PAGE_SIZE << qdev->lbq_buf_order;
1043 /* Get the next large buffer. */
1044 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1046 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1047 rx_ring->lbq_curr_idx++;
1048 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1049 rx_ring->lbq_curr_idx = 0;
1050 rx_ring->lbq_free_cnt++;
1054 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1055 struct rx_ring *rx_ring)
1057 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1059 pci_dma_sync_single_for_cpu(qdev->pdev,
1060 pci_unmap_addr(lbq_desc, mapaddr),
1061 rx_ring->lbq_buf_size,
1062 PCI_DMA_FROMDEVICE);
1064 /* If it's the last chunk of our master page then
1067 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1068 == ql_lbq_block_size(qdev))
1069 pci_unmap_page(qdev->pdev,
1070 lbq_desc->p.pg_chunk.map,
1071 ql_lbq_block_size(qdev),
1072 PCI_DMA_FROMDEVICE);
1076 /* Get the next small buffer. */
1077 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1079 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1080 rx_ring->sbq_curr_idx++;
1081 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1082 rx_ring->sbq_curr_idx = 0;
1083 rx_ring->sbq_free_cnt++;
1087 /* Update an rx ring index. */
1088 static void ql_update_cq(struct rx_ring *rx_ring)
1090 rx_ring->cnsmr_idx++;
1091 rx_ring->curr_entry++;
1092 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1093 rx_ring->cnsmr_idx = 0;
1094 rx_ring->curr_entry = rx_ring->cq_base;
1098 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1100 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1103 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1104 struct bq_desc *lbq_desc)
1106 if (!rx_ring->pg_chunk.page) {
1108 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1110 qdev->lbq_buf_order);
1111 if (unlikely(!rx_ring->pg_chunk.page)) {
1112 netif_err(qdev, drv, qdev->ndev,
1113 "page allocation failed.\n");
1116 rx_ring->pg_chunk.offset = 0;
1117 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1118 0, ql_lbq_block_size(qdev),
1119 PCI_DMA_FROMDEVICE);
1120 if (pci_dma_mapping_error(qdev->pdev, map)) {
1121 __free_pages(rx_ring->pg_chunk.page,
1122 qdev->lbq_buf_order);
1123 netif_err(qdev, drv, qdev->ndev,
1124 "PCI mapping failed.\n");
1127 rx_ring->pg_chunk.map = map;
1128 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1131 /* Copy the current master pg_chunk info
1132 * to the current descriptor.
1134 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1136 /* Adjust the master page chunk for next
1139 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1140 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1141 rx_ring->pg_chunk.page = NULL;
1142 lbq_desc->p.pg_chunk.last_flag = 1;
1144 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1145 get_page(rx_ring->pg_chunk.page);
1146 lbq_desc->p.pg_chunk.last_flag = 0;
1150 /* Process (refill) a large buffer queue. */
1151 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1153 u32 clean_idx = rx_ring->lbq_clean_idx;
1154 u32 start_idx = clean_idx;
1155 struct bq_desc *lbq_desc;
1159 while (rx_ring->lbq_free_cnt > 32) {
1160 for (i = 0; i < 16; i++) {
1161 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1162 "lbq: try cleaning clean_idx = %d.\n",
1164 lbq_desc = &rx_ring->lbq[clean_idx];
1165 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1166 netif_err(qdev, ifup, qdev->ndev,
1167 "Could not get a page chunk.\n");
1171 map = lbq_desc->p.pg_chunk.map +
1172 lbq_desc->p.pg_chunk.offset;
1173 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1174 pci_unmap_len_set(lbq_desc, maplen,
1175 rx_ring->lbq_buf_size);
1176 *lbq_desc->addr = cpu_to_le64(map);
1178 pci_dma_sync_single_for_device(qdev->pdev, map,
1179 rx_ring->lbq_buf_size,
1180 PCI_DMA_FROMDEVICE);
1182 if (clean_idx == rx_ring->lbq_len)
1186 rx_ring->lbq_clean_idx = clean_idx;
1187 rx_ring->lbq_prod_idx += 16;
1188 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1189 rx_ring->lbq_prod_idx = 0;
1190 rx_ring->lbq_free_cnt -= 16;
1193 if (start_idx != clean_idx) {
1194 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1195 "lbq: updating prod idx = %d.\n",
1196 rx_ring->lbq_prod_idx);
1197 ql_write_db_reg(rx_ring->lbq_prod_idx,
1198 rx_ring->lbq_prod_idx_db_reg);
1202 /* Process (refill) a small buffer queue. */
1203 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1205 u32 clean_idx = rx_ring->sbq_clean_idx;
1206 u32 start_idx = clean_idx;
1207 struct bq_desc *sbq_desc;
1211 while (rx_ring->sbq_free_cnt > 16) {
1212 for (i = 0; i < 16; i++) {
1213 sbq_desc = &rx_ring->sbq[clean_idx];
1214 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1215 "sbq: try cleaning clean_idx = %d.\n",
1217 if (sbq_desc->p.skb == NULL) {
1218 netif_printk(qdev, rx_status, KERN_DEBUG,
1220 "sbq: getting new skb for index %d.\n",
1223 netdev_alloc_skb(qdev->ndev,
1225 if (sbq_desc->p.skb == NULL) {
1226 netif_err(qdev, probe, qdev->ndev,
1227 "Couldn't get an skb.\n");
1228 rx_ring->sbq_clean_idx = clean_idx;
1231 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1232 map = pci_map_single(qdev->pdev,
1233 sbq_desc->p.skb->data,
1234 rx_ring->sbq_buf_size,
1235 PCI_DMA_FROMDEVICE);
1236 if (pci_dma_mapping_error(qdev->pdev, map)) {
1237 netif_err(qdev, ifup, qdev->ndev,
1238 "PCI mapping failed.\n");
1239 rx_ring->sbq_clean_idx = clean_idx;
1240 dev_kfree_skb_any(sbq_desc->p.skb);
1241 sbq_desc->p.skb = NULL;
1244 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1245 pci_unmap_len_set(sbq_desc, maplen,
1246 rx_ring->sbq_buf_size);
1247 *sbq_desc->addr = cpu_to_le64(map);
1251 if (clean_idx == rx_ring->sbq_len)
1254 rx_ring->sbq_clean_idx = clean_idx;
1255 rx_ring->sbq_prod_idx += 16;
1256 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1257 rx_ring->sbq_prod_idx = 0;
1258 rx_ring->sbq_free_cnt -= 16;
1261 if (start_idx != clean_idx) {
1262 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1263 "sbq: updating prod idx = %d.\n",
1264 rx_ring->sbq_prod_idx);
1265 ql_write_db_reg(rx_ring->sbq_prod_idx,
1266 rx_ring->sbq_prod_idx_db_reg);
1270 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1271 struct rx_ring *rx_ring)
1273 ql_update_sbq(qdev, rx_ring);
1274 ql_update_lbq(qdev, rx_ring);
1277 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1278 * fails at some stage, or from the interrupt when a tx completes.
1280 static void ql_unmap_send(struct ql_adapter *qdev,
1281 struct tx_ring_desc *tx_ring_desc, int mapped)
1284 for (i = 0; i < mapped; i++) {
1285 if (i == 0 || (i == 7 && mapped > 7)) {
1287 * Unmap the skb->data area, or the
1288 * external sglist (AKA the Outbound
1289 * Address List (OAL)).
1290 * If its the zeroeth element, then it's
1291 * the skb->data area. If it's the 7th
1292 * element and there is more than 6 frags,
1296 netif_printk(qdev, tx_done, KERN_DEBUG,
1298 "unmapping OAL area.\n");
1300 pci_unmap_single(qdev->pdev,
1301 pci_unmap_addr(&tx_ring_desc->map[i],
1303 pci_unmap_len(&tx_ring_desc->map[i],
1307 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1308 "unmapping frag %d.\n", i);
1309 pci_unmap_page(qdev->pdev,
1310 pci_unmap_addr(&tx_ring_desc->map[i],
1312 pci_unmap_len(&tx_ring_desc->map[i],
1313 maplen), PCI_DMA_TODEVICE);
1319 /* Map the buffers for this transmit. This will return
1320 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1322 static int ql_map_send(struct ql_adapter *qdev,
1323 struct ob_mac_iocb_req *mac_iocb_ptr,
1324 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1326 int len = skb_headlen(skb);
1328 int frag_idx, err, map_idx = 0;
1329 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1330 int frag_cnt = skb_shinfo(skb)->nr_frags;
1333 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1334 "frag_cnt = %d.\n", frag_cnt);
1337 * Map the skb buffer first.
1339 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1341 err = pci_dma_mapping_error(qdev->pdev, map);
1343 netif_err(qdev, tx_queued, qdev->ndev,
1344 "PCI mapping failed with error: %d\n", err);
1346 return NETDEV_TX_BUSY;
1349 tbd->len = cpu_to_le32(len);
1350 tbd->addr = cpu_to_le64(map);
1351 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1352 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1356 * This loop fills the remainder of the 8 address descriptors
1357 * in the IOCB. If there are more than 7 fragments, then the
1358 * eighth address desc will point to an external list (OAL).
1359 * When this happens, the remainder of the frags will be stored
1362 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1363 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1365 if (frag_idx == 6 && frag_cnt > 7) {
1366 /* Let's tack on an sglist.
1367 * Our control block will now
1369 * iocb->seg[0] = skb->data
1370 * iocb->seg[1] = frag[0]
1371 * iocb->seg[2] = frag[1]
1372 * iocb->seg[3] = frag[2]
1373 * iocb->seg[4] = frag[3]
1374 * iocb->seg[5] = frag[4]
1375 * iocb->seg[6] = frag[5]
1376 * iocb->seg[7] = ptr to OAL (external sglist)
1377 * oal->seg[0] = frag[6]
1378 * oal->seg[1] = frag[7]
1379 * oal->seg[2] = frag[8]
1380 * oal->seg[3] = frag[9]
1381 * oal->seg[4] = frag[10]
1384 /* Tack on the OAL in the eighth segment of IOCB. */
1385 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1388 err = pci_dma_mapping_error(qdev->pdev, map);
1390 netif_err(qdev, tx_queued, qdev->ndev,
1391 "PCI mapping outbound address list with error: %d\n",
1396 tbd->addr = cpu_to_le64(map);
1398 * The length is the number of fragments
1399 * that remain to be mapped times the length
1400 * of our sglist (OAL).
1403 cpu_to_le32((sizeof(struct tx_buf_desc) *
1404 (frag_cnt - frag_idx)) | TX_DESC_C);
1405 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1407 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1408 sizeof(struct oal));
1409 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1414 pci_map_page(qdev->pdev, frag->page,
1415 frag->page_offset, frag->size,
1418 err = pci_dma_mapping_error(qdev->pdev, map);
1420 netif_err(qdev, tx_queued, qdev->ndev,
1421 "PCI mapping frags failed with error: %d.\n",
1426 tbd->addr = cpu_to_le64(map);
1427 tbd->len = cpu_to_le32(frag->size);
1428 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1429 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1433 /* Save the number of segments we've mapped. */
1434 tx_ring_desc->map_cnt = map_idx;
1435 /* Terminate the last segment. */
1436 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1437 return NETDEV_TX_OK;
1441 * If the first frag mapping failed, then i will be zero.
1442 * This causes the unmap of the skb->data area. Otherwise
1443 * we pass in the number of frags that mapped successfully
1444 * so they can be umapped.
1446 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1447 return NETDEV_TX_BUSY;
1450 /* Process an inbound completion from an rx ring. */
1451 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1452 struct rx_ring *rx_ring,
1453 struct ib_mac_iocb_rsp *ib_mac_rsp,
1457 struct sk_buff *skb;
1458 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1459 struct skb_frag_struct *rx_frag;
1461 struct napi_struct *napi = &rx_ring->napi;
1463 napi->dev = qdev->ndev;
1465 skb = napi_get_frags(napi);
1467 netif_err(qdev, drv, qdev->ndev,
1468 "Couldn't get an skb, exiting.\n");
1469 rx_ring->rx_dropped++;
1470 put_page(lbq_desc->p.pg_chunk.page);
1473 prefetch(lbq_desc->p.pg_chunk.va);
1474 rx_frag = skb_shinfo(skb)->frags;
1475 nr_frags = skb_shinfo(skb)->nr_frags;
1476 rx_frag += nr_frags;
1477 rx_frag->page = lbq_desc->p.pg_chunk.page;
1478 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1479 rx_frag->size = length;
1482 skb->data_len += length;
1483 skb->truesize += length;
1484 skb_shinfo(skb)->nr_frags++;
1486 rx_ring->rx_packets++;
1487 rx_ring->rx_bytes += length;
1488 skb->ip_summed = CHECKSUM_UNNECESSARY;
1489 skb_record_rx_queue(skb, rx_ring->cq_id);
1490 if (qdev->vlgrp && (vlan_id != 0xffff))
1491 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1493 napi_gro_frags(napi);
1496 /* Process an inbound completion from an rx ring. */
1497 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1498 struct rx_ring *rx_ring,
1499 struct ib_mac_iocb_rsp *ib_mac_rsp,
1503 struct net_device *ndev = qdev->ndev;
1504 struct sk_buff *skb = NULL;
1506 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1507 struct napi_struct *napi = &rx_ring->napi;
1509 skb = netdev_alloc_skb(ndev, length);
1511 netif_err(qdev, drv, qdev->ndev,
1512 "Couldn't get an skb, need to unwind!.\n");
1513 rx_ring->rx_dropped++;
1514 put_page(lbq_desc->p.pg_chunk.page);
1518 addr = lbq_desc->p.pg_chunk.va;
1522 /* Frame error, so drop the packet. */
1523 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1524 netif_err(qdev, drv, qdev->ndev,
1525 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1526 rx_ring->rx_errors++;
1530 /* The max framesize filter on this chip is set higher than
1531 * MTU since FCoE uses 2k frames.
1533 if (skb->len > ndev->mtu + ETH_HLEN) {
1534 netif_err(qdev, drv, qdev->ndev,
1535 "Segment too small, dropping.\n");
1536 rx_ring->rx_dropped++;
1539 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1540 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1541 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1543 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1544 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1546 skb->len += length-ETH_HLEN;
1547 skb->data_len += length-ETH_HLEN;
1548 skb->truesize += length-ETH_HLEN;
1550 rx_ring->rx_packets++;
1551 rx_ring->rx_bytes += skb->len;
1552 skb->protocol = eth_type_trans(skb, ndev);
1553 skb->ip_summed = CHECKSUM_NONE;
1555 if (qdev->rx_csum &&
1556 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1558 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1559 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1560 "TCP checksum done!\n");
1561 skb->ip_summed = CHECKSUM_UNNECESSARY;
1562 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1563 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1564 /* Unfragmented ipv4 UDP frame. */
1565 struct iphdr *iph = (struct iphdr *) skb->data;
1566 if (!(iph->frag_off &
1567 cpu_to_be16(IP_MF|IP_OFFSET))) {
1568 skb->ip_summed = CHECKSUM_UNNECESSARY;
1569 netif_printk(qdev, rx_status, KERN_DEBUG,
1571 "TCP checksum done!\n");
1576 skb_record_rx_queue(skb, rx_ring->cq_id);
1577 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1578 if (qdev->vlgrp && (vlan_id != 0xffff))
1579 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1581 napi_gro_receive(napi, skb);
1583 if (qdev->vlgrp && (vlan_id != 0xffff))
1584 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1586 netif_receive_skb(skb);
1590 dev_kfree_skb_any(skb);
1591 put_page(lbq_desc->p.pg_chunk.page);
1594 /* Process an inbound completion from an rx ring. */
1595 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1596 struct rx_ring *rx_ring,
1597 struct ib_mac_iocb_rsp *ib_mac_rsp,
1601 struct net_device *ndev = qdev->ndev;
1602 struct sk_buff *skb = NULL;
1603 struct sk_buff *new_skb = NULL;
1604 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1606 skb = sbq_desc->p.skb;
1607 /* Allocate new_skb and copy */
1608 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1609 if (new_skb == NULL) {
1610 netif_err(qdev, probe, qdev->ndev,
1611 "No skb available, drop the packet.\n");
1612 rx_ring->rx_dropped++;
1615 skb_reserve(new_skb, NET_IP_ALIGN);
1616 memcpy(skb_put(new_skb, length), skb->data, length);
1619 /* Frame error, so drop the packet. */
1620 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1621 netif_err(qdev, drv, qdev->ndev,
1622 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1623 dev_kfree_skb_any(skb);
1624 rx_ring->rx_errors++;
1628 /* loopback self test for ethtool */
1629 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1630 ql_check_lb_frame(qdev, skb);
1631 dev_kfree_skb_any(skb);
1635 /* The max framesize filter on this chip is set higher than
1636 * MTU since FCoE uses 2k frames.
1638 if (skb->len > ndev->mtu + ETH_HLEN) {
1639 dev_kfree_skb_any(skb);
1640 rx_ring->rx_dropped++;
1644 prefetch(skb->data);
1646 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1647 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1649 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1650 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1651 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1652 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1653 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1654 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1656 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1657 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1658 "Promiscuous Packet.\n");
1660 rx_ring->rx_packets++;
1661 rx_ring->rx_bytes += skb->len;
1662 skb->protocol = eth_type_trans(skb, ndev);
1663 skb->ip_summed = CHECKSUM_NONE;
1665 /* If rx checksum is on, and there are no
1666 * csum or frame errors.
1668 if (qdev->rx_csum &&
1669 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1671 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1672 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1673 "TCP checksum done!\n");
1674 skb->ip_summed = CHECKSUM_UNNECESSARY;
1675 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1676 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1677 /* Unfragmented ipv4 UDP frame. */
1678 struct iphdr *iph = (struct iphdr *) skb->data;
1679 if (!(iph->frag_off &
1680 cpu_to_be16(IP_MF|IP_OFFSET))) {
1681 skb->ip_summed = CHECKSUM_UNNECESSARY;
1682 netif_printk(qdev, rx_status, KERN_DEBUG,
1684 "TCP checksum done!\n");
1689 skb_record_rx_queue(skb, rx_ring->cq_id);
1690 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1691 if (qdev->vlgrp && (vlan_id != 0xffff))
1692 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1695 napi_gro_receive(&rx_ring->napi, skb);
1697 if (qdev->vlgrp && (vlan_id != 0xffff))
1698 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1700 netif_receive_skb(skb);
1704 static void ql_realign_skb(struct sk_buff *skb, int len)
1706 void *temp_addr = skb->data;
1708 /* Undo the skb_reserve(skb,32) we did before
1709 * giving to hardware, and realign data on
1710 * a 2-byte boundary.
1712 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1713 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1714 skb_copy_to_linear_data(skb, temp_addr,
1719 * This function builds an skb for the given inbound
1720 * completion. It will be rewritten for readability in the near
1721 * future, but for not it works well.
1723 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1724 struct rx_ring *rx_ring,
1725 struct ib_mac_iocb_rsp *ib_mac_rsp)
1727 struct bq_desc *lbq_desc;
1728 struct bq_desc *sbq_desc;
1729 struct sk_buff *skb = NULL;
1730 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1731 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1734 * Handle the header buffer if present.
1736 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1737 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1738 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1739 "Header of %d bytes in small buffer.\n", hdr_len);
1741 * Headers fit nicely into a small buffer.
1743 sbq_desc = ql_get_curr_sbuf(rx_ring);
1744 pci_unmap_single(qdev->pdev,
1745 pci_unmap_addr(sbq_desc, mapaddr),
1746 pci_unmap_len(sbq_desc, maplen),
1747 PCI_DMA_FROMDEVICE);
1748 skb = sbq_desc->p.skb;
1749 ql_realign_skb(skb, hdr_len);
1750 skb_put(skb, hdr_len);
1751 sbq_desc->p.skb = NULL;
1755 * Handle the data buffer(s).
1757 if (unlikely(!length)) { /* Is there data too? */
1758 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1759 "No Data buffer in this packet.\n");
1763 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1764 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1765 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1766 "Headers in small, data of %d bytes in small, combine them.\n",
1769 * Data is less than small buffer size so it's
1770 * stuffed in a small buffer.
1771 * For this case we append the data
1772 * from the "data" small buffer to the "header" small
1775 sbq_desc = ql_get_curr_sbuf(rx_ring);
1776 pci_dma_sync_single_for_cpu(qdev->pdev,
1778 (sbq_desc, mapaddr),
1781 PCI_DMA_FROMDEVICE);
1782 memcpy(skb_put(skb, length),
1783 sbq_desc->p.skb->data, length);
1784 pci_dma_sync_single_for_device(qdev->pdev,
1791 PCI_DMA_FROMDEVICE);
1793 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1794 "%d bytes in a single small buffer.\n",
1796 sbq_desc = ql_get_curr_sbuf(rx_ring);
1797 skb = sbq_desc->p.skb;
1798 ql_realign_skb(skb, length);
1799 skb_put(skb, length);
1800 pci_unmap_single(qdev->pdev,
1801 pci_unmap_addr(sbq_desc,
1803 pci_unmap_len(sbq_desc,
1805 PCI_DMA_FROMDEVICE);
1806 sbq_desc->p.skb = NULL;
1808 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1809 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1810 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1811 "Header in small, %d bytes in large. Chain large to small!\n",
1814 * The data is in a single large buffer. We
1815 * chain it to the header buffer's skb and let
1818 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1819 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1820 "Chaining page at offset = %d, for %d bytes to skb.\n",
1821 lbq_desc->p.pg_chunk.offset, length);
1822 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1823 lbq_desc->p.pg_chunk.offset,
1826 skb->data_len += length;
1827 skb->truesize += length;
1830 * The headers and data are in a single large buffer. We
1831 * copy it to a new skb and let it go. This can happen with
1832 * jumbo mtu on a non-TCP/UDP frame.
1834 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1835 skb = netdev_alloc_skb(qdev->ndev, length);
1837 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1838 "No skb available, drop the packet.\n");
1841 pci_unmap_page(qdev->pdev,
1842 pci_unmap_addr(lbq_desc,
1844 pci_unmap_len(lbq_desc, maplen),
1845 PCI_DMA_FROMDEVICE);
1846 skb_reserve(skb, NET_IP_ALIGN);
1847 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1848 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1850 skb_fill_page_desc(skb, 0,
1851 lbq_desc->p.pg_chunk.page,
1852 lbq_desc->p.pg_chunk.offset,
1855 skb->data_len += length;
1856 skb->truesize += length;
1858 __pskb_pull_tail(skb,
1859 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1860 VLAN_ETH_HLEN : ETH_HLEN);
1864 * The data is in a chain of large buffers
1865 * pointed to by a small buffer. We loop
1866 * thru and chain them to the our small header
1868 * frags: There are 18 max frags and our small
1869 * buffer will hold 32 of them. The thing is,
1870 * we'll use 3 max for our 9000 byte jumbo
1871 * frames. If the MTU goes up we could
1872 * eventually be in trouble.
1875 sbq_desc = ql_get_curr_sbuf(rx_ring);
1876 pci_unmap_single(qdev->pdev,
1877 pci_unmap_addr(sbq_desc, mapaddr),
1878 pci_unmap_len(sbq_desc, maplen),
1879 PCI_DMA_FROMDEVICE);
1880 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1882 * This is an non TCP/UDP IP frame, so
1883 * the headers aren't split into a small
1884 * buffer. We have to use the small buffer
1885 * that contains our sg list as our skb to
1886 * send upstairs. Copy the sg list here to
1887 * a local buffer and use it to find the
1890 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1891 "%d bytes of headers & data in chain of large.\n",
1893 skb = sbq_desc->p.skb;
1894 sbq_desc->p.skb = NULL;
1895 skb_reserve(skb, NET_IP_ALIGN);
1897 while (length > 0) {
1898 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1899 size = (length < rx_ring->lbq_buf_size) ? length :
1900 rx_ring->lbq_buf_size;
1902 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1903 "Adding page %d to skb for %d bytes.\n",
1905 skb_fill_page_desc(skb, i,
1906 lbq_desc->p.pg_chunk.page,
1907 lbq_desc->p.pg_chunk.offset,
1910 skb->data_len += size;
1911 skb->truesize += size;
1915 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1916 VLAN_ETH_HLEN : ETH_HLEN);
1921 /* Process an inbound completion from an rx ring. */
1922 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1923 struct rx_ring *rx_ring,
1924 struct ib_mac_iocb_rsp *ib_mac_rsp,
1927 struct net_device *ndev = qdev->ndev;
1928 struct sk_buff *skb = NULL;
1930 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1932 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1933 if (unlikely(!skb)) {
1934 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1935 "No skb available, drop packet.\n");
1936 rx_ring->rx_dropped++;
1940 /* Frame error, so drop the packet. */
1941 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1942 netif_err(qdev, drv, qdev->ndev,
1943 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1944 dev_kfree_skb_any(skb);
1945 rx_ring->rx_errors++;
1949 /* The max framesize filter on this chip is set higher than
1950 * MTU since FCoE uses 2k frames.
1952 if (skb->len > ndev->mtu + ETH_HLEN) {
1953 dev_kfree_skb_any(skb);
1954 rx_ring->rx_dropped++;
1958 /* loopback self test for ethtool */
1959 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1960 ql_check_lb_frame(qdev, skb);
1961 dev_kfree_skb_any(skb);
1965 prefetch(skb->data);
1967 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1968 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1969 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1970 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1971 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1972 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1973 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1974 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1975 rx_ring->rx_multicast++;
1977 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1978 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1979 "Promiscuous Packet.\n");
1982 skb->protocol = eth_type_trans(skb, ndev);
1983 skb->ip_summed = CHECKSUM_NONE;
1985 /* If rx checksum is on, and there are no
1986 * csum or frame errors.
1988 if (qdev->rx_csum &&
1989 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1991 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1992 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1993 "TCP checksum done!\n");
1994 skb->ip_summed = CHECKSUM_UNNECESSARY;
1995 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1996 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1997 /* Unfragmented ipv4 UDP frame. */
1998 struct iphdr *iph = (struct iphdr *) skb->data;
1999 if (!(iph->frag_off &
2000 cpu_to_be16(IP_MF|IP_OFFSET))) {
2001 skb->ip_summed = CHECKSUM_UNNECESSARY;
2002 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2003 "TCP checksum done!\n");
2008 rx_ring->rx_packets++;
2009 rx_ring->rx_bytes += skb->len;
2010 skb_record_rx_queue(skb, rx_ring->cq_id);
2011 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2013 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2015 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2018 napi_gro_receive(&rx_ring->napi, skb);
2021 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2023 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2025 netif_receive_skb(skb);
2029 /* Process an inbound completion from an rx ring. */
2030 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2031 struct rx_ring *rx_ring,
2032 struct ib_mac_iocb_rsp *ib_mac_rsp)
2034 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2035 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2036 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2037 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2039 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2041 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2042 /* The data and headers are split into
2045 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2047 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2048 /* The data fit in a single small buffer.
2049 * Allocate a new skb, copy the data and
2050 * return the buffer to the free pool.
2052 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2054 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2055 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2056 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2057 /* TCP packet in a page chunk that's been checksummed.
2058 * Tack it on to our GRO skb and let it go.
2060 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2062 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2063 /* Non-TCP packet in a page chunk. Allocate an
2064 * skb, tack it on frags, and send it up.
2066 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2069 struct bq_desc *lbq_desc;
2071 /* Free small buffer that holds the IAL */
2072 lbq_desc = ql_get_curr_sbuf(rx_ring);
2073 netif_err(qdev, rx_err, qdev->ndev,
2074 "Dropping frame, len %d > mtu %d\n",
2075 length, qdev->ndev->mtu);
2077 /* Unwind the large buffers for this frame. */
2078 while (length > 0) {
2079 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
2080 length -= (length < rx_ring->lbq_buf_size) ?
2081 length : rx_ring->lbq_buf_size;
2082 put_page(lbq_desc->p.pg_chunk.page);
2086 return (unsigned long)length;
2089 /* Process an outbound completion from an rx ring. */
2090 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2091 struct ob_mac_iocb_rsp *mac_rsp)
2093 struct tx_ring *tx_ring;
2094 struct tx_ring_desc *tx_ring_desc;
2096 QL_DUMP_OB_MAC_RSP(mac_rsp);
2097 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2098 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2099 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2100 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2101 tx_ring->tx_packets++;
2102 dev_kfree_skb(tx_ring_desc->skb);
2103 tx_ring_desc->skb = NULL;
2105 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2108 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2109 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2110 netif_warn(qdev, tx_done, qdev->ndev,
2111 "Total descriptor length did not match transfer length.\n");
2113 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2114 netif_warn(qdev, tx_done, qdev->ndev,
2115 "Frame too short to be valid, not sent.\n");
2117 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2118 netif_warn(qdev, tx_done, qdev->ndev,
2119 "Frame too long, but sent anyway.\n");
2121 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2122 netif_warn(qdev, tx_done, qdev->ndev,
2123 "PCI backplane error. Frame not sent.\n");
2126 atomic_inc(&tx_ring->tx_count);
2129 /* Fire up a handler to reset the MPI processor. */
2130 void ql_queue_fw_error(struct ql_adapter *qdev)
2133 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2136 void ql_queue_asic_error(struct ql_adapter *qdev)
2139 ql_disable_interrupts(qdev);
2140 /* Clear adapter up bit to signal the recovery
2141 * process that it shouldn't kill the reset worker
2144 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2145 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2148 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2149 struct ib_ae_iocb_rsp *ib_ae_rsp)
2151 switch (ib_ae_rsp->event) {
2152 case MGMT_ERR_EVENT:
2153 netif_err(qdev, rx_err, qdev->ndev,
2154 "Management Processor Fatal Error.\n");
2155 ql_queue_fw_error(qdev);
2158 case CAM_LOOKUP_ERR_EVENT:
2159 netif_err(qdev, link, qdev->ndev,
2160 "Multiple CAM hits lookup occurred.\n");
2161 netif_err(qdev, drv, qdev->ndev,
2162 "This event shouldn't occur.\n");
2163 ql_queue_asic_error(qdev);
2166 case SOFT_ECC_ERROR_EVENT:
2167 netif_err(qdev, rx_err, qdev->ndev,
2168 "Soft ECC error detected.\n");
2169 ql_queue_asic_error(qdev);
2172 case PCI_ERR_ANON_BUF_RD:
2173 netif_err(qdev, rx_err, qdev->ndev,
2174 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2176 ql_queue_asic_error(qdev);
2180 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2182 ql_queue_asic_error(qdev);
2187 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2189 struct ql_adapter *qdev = rx_ring->qdev;
2190 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2191 struct ob_mac_iocb_rsp *net_rsp = NULL;
2194 struct tx_ring *tx_ring;
2195 /* While there are entries in the completion queue. */
2196 while (prod != rx_ring->cnsmr_idx) {
2198 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2199 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2200 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2202 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2204 switch (net_rsp->opcode) {
2206 case OPCODE_OB_MAC_TSO_IOCB:
2207 case OPCODE_OB_MAC_IOCB:
2208 ql_process_mac_tx_intr(qdev, net_rsp);
2211 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2212 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2216 ql_update_cq(rx_ring);
2217 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2219 ql_write_cq_idx(rx_ring);
2220 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2221 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
2223 if (atomic_read(&tx_ring->queue_stopped) &&
2224 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2226 * The queue got stopped because the tx_ring was full.
2227 * Wake it up, because it's now at least 25% empty.
2229 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2235 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2237 struct ql_adapter *qdev = rx_ring->qdev;
2238 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2239 struct ql_net_rsp_iocb *net_rsp;
2242 /* While there are entries in the completion queue. */
2243 while (prod != rx_ring->cnsmr_idx) {
2245 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2246 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2247 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2249 net_rsp = rx_ring->curr_entry;
2251 switch (net_rsp->opcode) {
2252 case OPCODE_IB_MAC_IOCB:
2253 ql_process_mac_rx_intr(qdev, rx_ring,
2254 (struct ib_mac_iocb_rsp *)
2258 case OPCODE_IB_AE_IOCB:
2259 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2263 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2264 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2269 ql_update_cq(rx_ring);
2270 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2271 if (count == budget)
2274 ql_update_buffer_queues(qdev, rx_ring);
2275 ql_write_cq_idx(rx_ring);
2279 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2281 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2282 struct ql_adapter *qdev = rx_ring->qdev;
2283 struct rx_ring *trx_ring;
2284 int i, work_done = 0;
2285 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2287 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2288 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2290 /* Service the TX rings first. They start
2291 * right after the RSS rings. */
2292 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2293 trx_ring = &qdev->rx_ring[i];
2294 /* If this TX completion ring belongs to this vector and
2295 * it's not empty then service it.
2297 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2298 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2299 trx_ring->cnsmr_idx)) {
2300 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2301 "%s: Servicing TX completion ring %d.\n",
2302 __func__, trx_ring->cq_id);
2303 ql_clean_outbound_rx_ring(trx_ring);
2308 * Now service the RSS ring if it's active.
2310 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2311 rx_ring->cnsmr_idx) {
2312 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2313 "%s: Servicing RX completion ring %d.\n",
2314 __func__, rx_ring->cq_id);
2315 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2318 if (work_done < budget) {
2319 napi_complete(napi);
2320 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2325 static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
2327 struct ql_adapter *qdev = netdev_priv(ndev);
2331 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2332 "Turning on VLAN in NIC_RCV_CFG.\n");
2333 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2334 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2336 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2337 "Turning off VLAN in NIC_RCV_CFG.\n");
2338 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2342 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2344 struct ql_adapter *qdev = netdev_priv(ndev);
2345 u32 enable_bit = MAC_ADDR_E;
2348 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2351 if (ql_set_mac_addr_reg
2352 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2353 netif_err(qdev, ifup, qdev->ndev,
2354 "Failed to init vlan address.\n");
2356 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2359 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2361 struct ql_adapter *qdev = netdev_priv(ndev);
2365 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2369 if (ql_set_mac_addr_reg
2370 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2371 netif_err(qdev, ifup, qdev->ndev,
2372 "Failed to clear vlan address.\n");
2374 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2378 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2379 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2381 struct rx_ring *rx_ring = dev_id;
2382 napi_schedule(&rx_ring->napi);
2386 /* This handles a fatal error, MPI activity, and the default
2387 * rx_ring in an MSI-X multiple vector environment.
2388 * In MSI/Legacy environment it also process the rest of
2391 static irqreturn_t qlge_isr(int irq, void *dev_id)
2393 struct rx_ring *rx_ring = dev_id;
2394 struct ql_adapter *qdev = rx_ring->qdev;
2395 struct intr_context *intr_context = &qdev->intr_context[0];
2399 spin_lock(&qdev->hw_lock);
2400 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2401 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2402 "Shared Interrupt, Not ours!\n");
2403 spin_unlock(&qdev->hw_lock);
2406 spin_unlock(&qdev->hw_lock);
2408 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2411 * Check for fatal error.
2414 ql_queue_asic_error(qdev);
2415 netif_err(qdev, intr, qdev->ndev,
2416 "Got fatal error, STS = %x.\n", var);
2417 var = ql_read32(qdev, ERR_STS);
2418 netif_err(qdev, intr, qdev->ndev,
2419 "Resetting chip. Error Status Register = 0x%x\n", var);
2424 * Check MPI processor activity.
2426 if ((var & STS_PI) &&
2427 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2429 * We've got an async event or mailbox completion.
2430 * Handle it and clear the source of the interrupt.
2432 netif_err(qdev, intr, qdev->ndev,
2433 "Got MPI processor interrupt.\n");
2434 ql_disable_completion_interrupt(qdev, intr_context->intr);
2435 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2436 queue_delayed_work_on(smp_processor_id(),
2437 qdev->workqueue, &qdev->mpi_work, 0);
2442 * Get the bit-mask that shows the active queues for this
2443 * pass. Compare it to the queues that this irq services
2444 * and call napi if there's a match.
2446 var = ql_read32(qdev, ISR1);
2447 if (var & intr_context->irq_mask) {
2448 netif_info(qdev, intr, qdev->ndev,
2449 "Waking handler for rx_ring[0].\n");
2450 ql_disable_completion_interrupt(qdev, intr_context->intr);
2451 napi_schedule(&rx_ring->napi);
2454 ql_enable_completion_interrupt(qdev, intr_context->intr);
2455 return work_done ? IRQ_HANDLED : IRQ_NONE;
2458 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2461 if (skb_is_gso(skb)) {
2463 if (skb_header_cloned(skb)) {
2464 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2469 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2470 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2471 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2472 mac_iocb_ptr->total_hdrs_len =
2473 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2474 mac_iocb_ptr->net_trans_offset =
2475 cpu_to_le16(skb_network_offset(skb) |
2476 skb_transport_offset(skb)
2477 << OB_MAC_TRANSPORT_HDR_SHIFT);
2478 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2479 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2480 if (likely(skb->protocol == htons(ETH_P_IP))) {
2481 struct iphdr *iph = ip_hdr(skb);
2483 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2484 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2488 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2489 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2490 tcp_hdr(skb)->check =
2491 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2492 &ipv6_hdr(skb)->daddr,
2500 static void ql_hw_csum_setup(struct sk_buff *skb,
2501 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2504 struct iphdr *iph = ip_hdr(skb);
2506 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2507 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2508 mac_iocb_ptr->net_trans_offset =
2509 cpu_to_le16(skb_network_offset(skb) |
2510 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2512 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2513 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2514 if (likely(iph->protocol == IPPROTO_TCP)) {
2515 check = &(tcp_hdr(skb)->check);
2516 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2517 mac_iocb_ptr->total_hdrs_len =
2518 cpu_to_le16(skb_transport_offset(skb) +
2519 (tcp_hdr(skb)->doff << 2));
2521 check = &(udp_hdr(skb)->check);
2522 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2523 mac_iocb_ptr->total_hdrs_len =
2524 cpu_to_le16(skb_transport_offset(skb) +
2525 sizeof(struct udphdr));
2527 *check = ~csum_tcpudp_magic(iph->saddr,
2528 iph->daddr, len, iph->protocol, 0);
2531 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2533 struct tx_ring_desc *tx_ring_desc;
2534 struct ob_mac_iocb_req *mac_iocb_ptr;
2535 struct ql_adapter *qdev = netdev_priv(ndev);
2537 struct tx_ring *tx_ring;
2538 u32 tx_ring_idx = (u32) skb->queue_mapping;
2540 tx_ring = &qdev->tx_ring[tx_ring_idx];
2542 if (skb_padto(skb, ETH_ZLEN))
2543 return NETDEV_TX_OK;
2545 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2546 netif_info(qdev, tx_queued, qdev->ndev,
2547 "%s: shutting down tx queue %d du to lack of resources.\n",
2548 __func__, tx_ring_idx);
2549 netif_stop_subqueue(ndev, tx_ring->wq_id);
2550 atomic_inc(&tx_ring->queue_stopped);
2551 tx_ring->tx_errors++;
2552 return NETDEV_TX_BUSY;
2554 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2555 mac_iocb_ptr = tx_ring_desc->queue_entry;
2556 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2558 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2559 mac_iocb_ptr->tid = tx_ring_desc->index;
2560 /* We use the upper 32-bits to store the tx queue for this IO.
2561 * When we get the completion we can use it to establish the context.
2563 mac_iocb_ptr->txq_idx = tx_ring_idx;
2564 tx_ring_desc->skb = skb;
2566 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2568 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2569 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2570 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2571 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2572 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2574 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2576 dev_kfree_skb_any(skb);
2577 return NETDEV_TX_OK;
2578 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2579 ql_hw_csum_setup(skb,
2580 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2582 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2584 netif_err(qdev, tx_queued, qdev->ndev,
2585 "Could not map the segments.\n");
2586 tx_ring->tx_errors++;
2587 return NETDEV_TX_BUSY;
2589 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2590 tx_ring->prod_idx++;
2591 if (tx_ring->prod_idx == tx_ring->wq_len)
2592 tx_ring->prod_idx = 0;
2595 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2596 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2597 "tx queued, slot %d, len %d\n",
2598 tx_ring->prod_idx, skb->len);
2600 atomic_dec(&tx_ring->tx_count);
2601 return NETDEV_TX_OK;
2605 static void ql_free_shadow_space(struct ql_adapter *qdev)
2607 if (qdev->rx_ring_shadow_reg_area) {
2608 pci_free_consistent(qdev->pdev,
2610 qdev->rx_ring_shadow_reg_area,
2611 qdev->rx_ring_shadow_reg_dma);
2612 qdev->rx_ring_shadow_reg_area = NULL;
2614 if (qdev->tx_ring_shadow_reg_area) {
2615 pci_free_consistent(qdev->pdev,
2617 qdev->tx_ring_shadow_reg_area,
2618 qdev->tx_ring_shadow_reg_dma);
2619 qdev->tx_ring_shadow_reg_area = NULL;
2623 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2625 qdev->rx_ring_shadow_reg_area =
2626 pci_alloc_consistent(qdev->pdev,
2627 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2628 if (qdev->rx_ring_shadow_reg_area == NULL) {
2629 netif_err(qdev, ifup, qdev->ndev,
2630 "Allocation of RX shadow space failed.\n");
2633 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2634 qdev->tx_ring_shadow_reg_area =
2635 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2636 &qdev->tx_ring_shadow_reg_dma);
2637 if (qdev->tx_ring_shadow_reg_area == NULL) {
2638 netif_err(qdev, ifup, qdev->ndev,
2639 "Allocation of TX shadow space failed.\n");
2640 goto err_wqp_sh_area;
2642 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2646 pci_free_consistent(qdev->pdev,
2648 qdev->rx_ring_shadow_reg_area,
2649 qdev->rx_ring_shadow_reg_dma);
2653 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2655 struct tx_ring_desc *tx_ring_desc;
2657 struct ob_mac_iocb_req *mac_iocb_ptr;
2659 mac_iocb_ptr = tx_ring->wq_base;
2660 tx_ring_desc = tx_ring->q;
2661 for (i = 0; i < tx_ring->wq_len; i++) {
2662 tx_ring_desc->index = i;
2663 tx_ring_desc->skb = NULL;
2664 tx_ring_desc->queue_entry = mac_iocb_ptr;
2668 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2669 atomic_set(&tx_ring->queue_stopped, 0);
2672 static void ql_free_tx_resources(struct ql_adapter *qdev,
2673 struct tx_ring *tx_ring)
2675 if (tx_ring->wq_base) {
2676 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2677 tx_ring->wq_base, tx_ring->wq_base_dma);
2678 tx_ring->wq_base = NULL;
2684 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2685 struct tx_ring *tx_ring)
2688 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2689 &tx_ring->wq_base_dma);
2691 if ((tx_ring->wq_base == NULL) ||
2692 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2693 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2697 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2698 if (tx_ring->q == NULL)
2703 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2704 tx_ring->wq_base, tx_ring->wq_base_dma);
2708 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2710 struct bq_desc *lbq_desc;
2712 uint32_t curr_idx, clean_idx;
2714 curr_idx = rx_ring->lbq_curr_idx;
2715 clean_idx = rx_ring->lbq_clean_idx;
2716 while (curr_idx != clean_idx) {
2717 lbq_desc = &rx_ring->lbq[curr_idx];
2719 if (lbq_desc->p.pg_chunk.last_flag) {
2720 pci_unmap_page(qdev->pdev,
2721 lbq_desc->p.pg_chunk.map,
2722 ql_lbq_block_size(qdev),
2723 PCI_DMA_FROMDEVICE);
2724 lbq_desc->p.pg_chunk.last_flag = 0;
2727 put_page(lbq_desc->p.pg_chunk.page);
2728 lbq_desc->p.pg_chunk.page = NULL;
2730 if (++curr_idx == rx_ring->lbq_len)
2736 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2739 struct bq_desc *sbq_desc;
2741 for (i = 0; i < rx_ring->sbq_len; i++) {
2742 sbq_desc = &rx_ring->sbq[i];
2743 if (sbq_desc == NULL) {
2744 netif_err(qdev, ifup, qdev->ndev,
2745 "sbq_desc %d is NULL.\n", i);
2748 if (sbq_desc->p.skb) {
2749 pci_unmap_single(qdev->pdev,
2750 pci_unmap_addr(sbq_desc, mapaddr),
2751 pci_unmap_len(sbq_desc, maplen),
2752 PCI_DMA_FROMDEVICE);
2753 dev_kfree_skb(sbq_desc->p.skb);
2754 sbq_desc->p.skb = NULL;
2759 /* Free all large and small rx buffers associated
2760 * with the completion queues for this device.
2762 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2765 struct rx_ring *rx_ring;
2767 for (i = 0; i < qdev->rx_ring_count; i++) {
2768 rx_ring = &qdev->rx_ring[i];
2770 ql_free_lbq_buffers(qdev, rx_ring);
2772 ql_free_sbq_buffers(qdev, rx_ring);
2776 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2778 struct rx_ring *rx_ring;
2781 for (i = 0; i < qdev->rx_ring_count; i++) {
2782 rx_ring = &qdev->rx_ring[i];
2783 if (rx_ring->type != TX_Q)
2784 ql_update_buffer_queues(qdev, rx_ring);
2788 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2789 struct rx_ring *rx_ring)
2792 struct bq_desc *lbq_desc;
2793 __le64 *bq = rx_ring->lbq_base;
2795 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2796 for (i = 0; i < rx_ring->lbq_len; i++) {
2797 lbq_desc = &rx_ring->lbq[i];
2798 memset(lbq_desc, 0, sizeof(*lbq_desc));
2799 lbq_desc->index = i;
2800 lbq_desc->addr = bq;
2805 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2806 struct rx_ring *rx_ring)
2809 struct bq_desc *sbq_desc;
2810 __le64 *bq = rx_ring->sbq_base;
2812 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2813 for (i = 0; i < rx_ring->sbq_len; i++) {
2814 sbq_desc = &rx_ring->sbq[i];
2815 memset(sbq_desc, 0, sizeof(*sbq_desc));
2816 sbq_desc->index = i;
2817 sbq_desc->addr = bq;
2822 static void ql_free_rx_resources(struct ql_adapter *qdev,
2823 struct rx_ring *rx_ring)
2825 /* Free the small buffer queue. */
2826 if (rx_ring->sbq_base) {
2827 pci_free_consistent(qdev->pdev,
2829 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2830 rx_ring->sbq_base = NULL;
2833 /* Free the small buffer queue control blocks. */
2834 kfree(rx_ring->sbq);
2835 rx_ring->sbq = NULL;
2837 /* Free the large buffer queue. */
2838 if (rx_ring->lbq_base) {
2839 pci_free_consistent(qdev->pdev,
2841 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2842 rx_ring->lbq_base = NULL;
2845 /* Free the large buffer queue control blocks. */
2846 kfree(rx_ring->lbq);
2847 rx_ring->lbq = NULL;
2849 /* Free the rx queue. */
2850 if (rx_ring->cq_base) {
2851 pci_free_consistent(qdev->pdev,
2853 rx_ring->cq_base, rx_ring->cq_base_dma);
2854 rx_ring->cq_base = NULL;
2858 /* Allocate queues and buffers for this completions queue based
2859 * on the values in the parameter structure. */
2860 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2861 struct rx_ring *rx_ring)
2865 * Allocate the completion queue for this rx_ring.
2868 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2869 &rx_ring->cq_base_dma);
2871 if (rx_ring->cq_base == NULL) {
2872 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2876 if (rx_ring->sbq_len) {
2878 * Allocate small buffer queue.
2881 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2882 &rx_ring->sbq_base_dma);
2884 if (rx_ring->sbq_base == NULL) {
2885 netif_err(qdev, ifup, qdev->ndev,
2886 "Small buffer queue allocation failed.\n");
2891 * Allocate small buffer queue control blocks.
2894 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2896 if (rx_ring->sbq == NULL) {
2897 netif_err(qdev, ifup, qdev->ndev,
2898 "Small buffer queue control block allocation failed.\n");
2902 ql_init_sbq_ring(qdev, rx_ring);
2905 if (rx_ring->lbq_len) {
2907 * Allocate large buffer queue.
2910 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2911 &rx_ring->lbq_base_dma);
2913 if (rx_ring->lbq_base == NULL) {
2914 netif_err(qdev, ifup, qdev->ndev,
2915 "Large buffer queue allocation failed.\n");
2919 * Allocate large buffer queue control blocks.
2922 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2924 if (rx_ring->lbq == NULL) {
2925 netif_err(qdev, ifup, qdev->ndev,
2926 "Large buffer queue control block allocation failed.\n");
2930 ql_init_lbq_ring(qdev, rx_ring);
2936 ql_free_rx_resources(qdev, rx_ring);
2940 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2942 struct tx_ring *tx_ring;
2943 struct tx_ring_desc *tx_ring_desc;
2947 * Loop through all queues and free
2950 for (j = 0; j < qdev->tx_ring_count; j++) {
2951 tx_ring = &qdev->tx_ring[j];
2952 for (i = 0; i < tx_ring->wq_len; i++) {
2953 tx_ring_desc = &tx_ring->q[i];
2954 if (tx_ring_desc && tx_ring_desc->skb) {
2955 netif_err(qdev, ifdown, qdev->ndev,
2956 "Freeing lost SKB %p, from queue %d, index %d.\n",
2957 tx_ring_desc->skb, j,
2958 tx_ring_desc->index);
2959 ql_unmap_send(qdev, tx_ring_desc,
2960 tx_ring_desc->map_cnt);
2961 dev_kfree_skb(tx_ring_desc->skb);
2962 tx_ring_desc->skb = NULL;
2968 static void ql_free_mem_resources(struct ql_adapter *qdev)
2972 for (i = 0; i < qdev->tx_ring_count; i++)
2973 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2974 for (i = 0; i < qdev->rx_ring_count; i++)
2975 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2976 ql_free_shadow_space(qdev);
2979 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2983 /* Allocate space for our shadow registers and such. */
2984 if (ql_alloc_shadow_space(qdev))
2987 for (i = 0; i < qdev->rx_ring_count; i++) {
2988 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2989 netif_err(qdev, ifup, qdev->ndev,
2990 "RX resource allocation failed.\n");
2994 /* Allocate tx queue resources */
2995 for (i = 0; i < qdev->tx_ring_count; i++) {
2996 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2997 netif_err(qdev, ifup, qdev->ndev,
2998 "TX resource allocation failed.\n");
3005 ql_free_mem_resources(qdev);
3009 /* Set up the rx ring control block and pass it to the chip.
3010 * The control block is defined as
3011 * "Completion Queue Initialization Control Block", or cqicb.
3013 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3015 struct cqicb *cqicb = &rx_ring->cqicb;
3016 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3017 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3018 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3019 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3020 void __iomem *doorbell_area =
3021 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3025 __le64 *base_indirect_ptr;
3028 /* Set up the shadow registers for this ring. */
3029 rx_ring->prod_idx_sh_reg = shadow_reg;
3030 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3031 *rx_ring->prod_idx_sh_reg = 0;
3032 shadow_reg += sizeof(u64);
3033 shadow_reg_dma += sizeof(u64);
3034 rx_ring->lbq_base_indirect = shadow_reg;
3035 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3036 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3037 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3038 rx_ring->sbq_base_indirect = shadow_reg;
3039 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3041 /* PCI doorbell mem area + 0x00 for consumer index register */
3042 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3043 rx_ring->cnsmr_idx = 0;
3044 rx_ring->curr_entry = rx_ring->cq_base;
3046 /* PCI doorbell mem area + 0x04 for valid register */
3047 rx_ring->valid_db_reg = doorbell_area + 0x04;
3049 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3050 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3052 /* PCI doorbell mem area + 0x1c */
3053 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3055 memset((void *)cqicb, 0, sizeof(struct cqicb));
3056 cqicb->msix_vect = rx_ring->irq;
3058 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3059 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3061 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3063 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3066 * Set up the control block load flags.
3068 cqicb->flags = FLAGS_LC | /* Load queue base address */
3069 FLAGS_LV | /* Load MSI-X vector */
3070 FLAGS_LI; /* Load irq delay values */
3071 if (rx_ring->lbq_len) {
3072 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3073 tmp = (u64)rx_ring->lbq_base_dma;
3074 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3077 *base_indirect_ptr = cpu_to_le64(tmp);
3078 tmp += DB_PAGE_SIZE;
3079 base_indirect_ptr++;
3081 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3083 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3084 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3085 (u16) rx_ring->lbq_buf_size;
3086 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3087 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3088 (u16) rx_ring->lbq_len;
3089 cqicb->lbq_len = cpu_to_le16(bq_len);
3090 rx_ring->lbq_prod_idx = 0;
3091 rx_ring->lbq_curr_idx = 0;
3092 rx_ring->lbq_clean_idx = 0;
3093 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3095 if (rx_ring->sbq_len) {
3096 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3097 tmp = (u64)rx_ring->sbq_base_dma;
3098 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3101 *base_indirect_ptr = cpu_to_le64(tmp);
3102 tmp += DB_PAGE_SIZE;
3103 base_indirect_ptr++;
3105 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3107 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3108 cqicb->sbq_buf_size =
3109 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3110 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3111 (u16) rx_ring->sbq_len;
3112 cqicb->sbq_len = cpu_to_le16(bq_len);
3113 rx_ring->sbq_prod_idx = 0;
3114 rx_ring->sbq_curr_idx = 0;
3115 rx_ring->sbq_clean_idx = 0;
3116 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3118 switch (rx_ring->type) {
3120 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3121 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3124 /* Inbound completion handling rx_rings run in
3125 * separate NAPI contexts.
3127 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3129 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3130 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3133 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3134 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3136 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3137 "Initializing rx work queue.\n");
3138 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3139 CFG_LCQ, rx_ring->cq_id);
3141 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3147 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3149 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3150 void __iomem *doorbell_area =
3151 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3152 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3153 (tx_ring->wq_id * sizeof(u64));
3154 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3155 (tx_ring->wq_id * sizeof(u64));
3159 * Assign doorbell registers for this tx_ring.
3161 /* TX PCI doorbell mem area for tx producer index */
3162 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3163 tx_ring->prod_idx = 0;
3164 /* TX PCI doorbell mem area + 0x04 */
3165 tx_ring->valid_db_reg = doorbell_area + 0x04;
3168 * Assign shadow registers for this tx_ring.
3170 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3171 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3173 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3174 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3175 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3176 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3178 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3180 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3182 ql_init_tx_ring(qdev, tx_ring);
3184 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3185 (u16) tx_ring->wq_id);
3187 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3190 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3191 "Successfully loaded WQICB.\n");
3195 static void ql_disable_msix(struct ql_adapter *qdev)
3197 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3198 pci_disable_msix(qdev->pdev);
3199 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3200 kfree(qdev->msi_x_entry);
3201 qdev->msi_x_entry = NULL;
3202 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3203 pci_disable_msi(qdev->pdev);
3204 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3208 /* We start by trying to get the number of vectors
3209 * stored in qdev->intr_count. If we don't get that
3210 * many then we reduce the count and try again.
3212 static void ql_enable_msix(struct ql_adapter *qdev)
3216 /* Get the MSIX vectors. */
3217 if (qlge_irq_type == MSIX_IRQ) {
3218 /* Try to alloc space for the msix struct,
3219 * if it fails then go to MSI/legacy.
3221 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3222 sizeof(struct msix_entry),
3224 if (!qdev->msi_x_entry) {
3225 qlge_irq_type = MSI_IRQ;
3229 for (i = 0; i < qdev->intr_count; i++)
3230 qdev->msi_x_entry[i].entry = i;
3232 /* Loop to get our vectors. We start with
3233 * what we want and settle for what we get.
3236 err = pci_enable_msix(qdev->pdev,
3237 qdev->msi_x_entry, qdev->intr_count);
3239 qdev->intr_count = err;
3243 kfree(qdev->msi_x_entry);
3244 qdev->msi_x_entry = NULL;
3245 netif_warn(qdev, ifup, qdev->ndev,
3246 "MSI-X Enable failed, trying MSI.\n");
3247 qdev->intr_count = 1;
3248 qlge_irq_type = MSI_IRQ;
3249 } else if (err == 0) {
3250 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3251 netif_info(qdev, ifup, qdev->ndev,
3252 "MSI-X Enabled, got %d vectors.\n",
3258 qdev->intr_count = 1;
3259 if (qlge_irq_type == MSI_IRQ) {
3260 if (!pci_enable_msi(qdev->pdev)) {
3261 set_bit(QL_MSI_ENABLED, &qdev->flags);
3262 netif_info(qdev, ifup, qdev->ndev,
3263 "Running with MSI interrupts.\n");
3267 qlge_irq_type = LEG_IRQ;
3268 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3269 "Running with legacy interrupts.\n");
3272 /* Each vector services 1 RSS ring and and 1 or more
3273 * TX completion rings. This function loops through
3274 * the TX completion rings and assigns the vector that
3275 * will service it. An example would be if there are
3276 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3277 * This would mean that vector 0 would service RSS ring 0
3278 * and TX competion rings 0,1,2 and 3. Vector 1 would
3279 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3281 static void ql_set_tx_vect(struct ql_adapter *qdev)
3284 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3286 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3287 /* Assign irq vectors to TX rx_rings.*/
3288 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3289 i < qdev->rx_ring_count; i++) {
3290 if (j == tx_rings_per_vector) {
3294 qdev->rx_ring[i].irq = vect;
3298 /* For single vector all rings have an irq
3301 for (i = 0; i < qdev->rx_ring_count; i++)
3302 qdev->rx_ring[i].irq = 0;
3306 /* Set the interrupt mask for this vector. Each vector
3307 * will service 1 RSS ring and 1 or more TX completion
3308 * rings. This function sets up a bit mask per vector
3309 * that indicates which rings it services.
3311 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3313 int j, vect = ctx->intr;
3314 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3316 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3317 /* Add the RSS ring serviced by this vector
3320 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3321 /* Add the TX ring(s) serviced by this vector
3323 for (j = 0; j < tx_rings_per_vector; j++) {
3325 (1 << qdev->rx_ring[qdev->rss_ring_count +
3326 (vect * tx_rings_per_vector) + j].cq_id);
3329 /* For single vector we just shift each queue's
3332 for (j = 0; j < qdev->rx_ring_count; j++)
3333 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3338 * Here we build the intr_context structures based on
3339 * our rx_ring count and intr vector count.
3340 * The intr_context structure is used to hook each vector
3341 * to possibly different handlers.
3343 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3346 struct intr_context *intr_context = &qdev->intr_context[0];
3348 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3349 /* Each rx_ring has it's
3350 * own intr_context since we have separate
3351 * vectors for each queue.
3353 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3354 qdev->rx_ring[i].irq = i;
3355 intr_context->intr = i;
3356 intr_context->qdev = qdev;
3357 /* Set up this vector's bit-mask that indicates
3358 * which queues it services.
3360 ql_set_irq_mask(qdev, intr_context);
3362 * We set up each vectors enable/disable/read bits so
3363 * there's no bit/mask calculations in the critical path.
3365 intr_context->intr_en_mask =
3366 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3367 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3369 intr_context->intr_dis_mask =
3370 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3371 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3373 intr_context->intr_read_mask =
3374 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3375 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3378 /* The first vector/queue handles
3379 * broadcast/multicast, fatal errors,
3380 * and firmware events. This in addition
3381 * to normal inbound NAPI processing.
3383 intr_context->handler = qlge_isr;
3384 sprintf(intr_context->name, "%s-rx-%d",
3385 qdev->ndev->name, i);
3388 * Inbound queues handle unicast frames only.
3390 intr_context->handler = qlge_msix_rx_isr;
3391 sprintf(intr_context->name, "%s-rx-%d",
3392 qdev->ndev->name, i);
3397 * All rx_rings use the same intr_context since
3398 * there is only one vector.
3400 intr_context->intr = 0;
3401 intr_context->qdev = qdev;
3403 * We set up each vectors enable/disable/read bits so
3404 * there's no bit/mask calculations in the critical path.
3406 intr_context->intr_en_mask =
3407 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3408 intr_context->intr_dis_mask =
3409 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3410 INTR_EN_TYPE_DISABLE;
3411 intr_context->intr_read_mask =
3412 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3414 * Single interrupt means one handler for all rings.
3416 intr_context->handler = qlge_isr;
3417 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3418 /* Set up this vector's bit-mask that indicates
3419 * which queues it services. In this case there is
3420 * a single vector so it will service all RSS and
3421 * TX completion rings.
3423 ql_set_irq_mask(qdev, intr_context);
3425 /* Tell the TX completion rings which MSIx vector
3426 * they will be using.
3428 ql_set_tx_vect(qdev);
3431 static void ql_free_irq(struct ql_adapter *qdev)
3434 struct intr_context *intr_context = &qdev->intr_context[0];
3436 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3437 if (intr_context->hooked) {
3438 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3439 free_irq(qdev->msi_x_entry[i].vector,
3441 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3442 "freeing msix interrupt %d.\n", i);
3444 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3445 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3446 "freeing msi interrupt %d.\n", i);
3450 ql_disable_msix(qdev);
3453 static int ql_request_irq(struct ql_adapter *qdev)
3457 struct pci_dev *pdev = qdev->pdev;
3458 struct intr_context *intr_context = &qdev->intr_context[0];
3460 ql_resolve_queues_to_irqs(qdev);
3462 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3463 atomic_set(&intr_context->irq_cnt, 0);
3464 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3465 status = request_irq(qdev->msi_x_entry[i].vector,
3466 intr_context->handler,
3471 netif_err(qdev, ifup, qdev->ndev,
3472 "Failed request for MSIX interrupt %d.\n",
3476 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3477 "Hooked intr %d, queue type %s, with name %s.\n",
3479 qdev->rx_ring[i].type == DEFAULT_Q ?
3481 qdev->rx_ring[i].type == TX_Q ?
3483 qdev->rx_ring[i].type == RX_Q ?
3485 intr_context->name);
3488 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3489 "trying msi or legacy interrupts.\n");
3490 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3491 "%s: irq = %d.\n", __func__, pdev->irq);
3492 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3493 "%s: context->name = %s.\n", __func__,
3494 intr_context->name);
3495 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3496 "%s: dev_id = 0x%p.\n", __func__,
3499 request_irq(pdev->irq, qlge_isr,
3500 test_bit(QL_MSI_ENABLED,
3502 flags) ? 0 : IRQF_SHARED,
3503 intr_context->name, &qdev->rx_ring[0]);
3507 netif_err(qdev, ifup, qdev->ndev,
3508 "Hooked intr %d, queue type %s, with name %s.\n",
3510 qdev->rx_ring[0].type == DEFAULT_Q ?
3512 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3513 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3514 intr_context->name);
3516 intr_context->hooked = 1;
3520 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3525 static int ql_start_rss(struct ql_adapter *qdev)
3527 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3528 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3529 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3530 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3531 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3532 0xbe, 0xac, 0x01, 0xfa};
3533 struct ricb *ricb = &qdev->ricb;
3536 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3538 memset((void *)ricb, 0, sizeof(*ricb));
3540 ricb->base_cq = RSS_L4K;
3542 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3543 ricb->mask = cpu_to_le16((u16)(0x3ff));
3546 * Fill out the Indirection Table.
3548 for (i = 0; i < 1024; i++)
3549 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3551 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3552 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3554 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3556 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3558 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3561 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3562 "Successfully loaded RICB.\n");
3566 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3570 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3573 /* Clear all the entries in the routing table. */
3574 for (i = 0; i < 16; i++) {
3575 status = ql_set_routing_reg(qdev, i, 0, 0);
3577 netif_err(qdev, ifup, qdev->ndev,
3578 "Failed to init routing register for CAM packets.\n");
3582 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3586 /* Initialize the frame-to-queue routing. */
3587 static int ql_route_initialize(struct ql_adapter *qdev)
3591 /* Clear all the entries in the routing table. */
3592 status = ql_clear_routing_entries(qdev);
3596 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3600 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3602 netif_err(qdev, ifup, qdev->ndev,
3603 "Failed to init routing register for error packets.\n");
3606 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3608 netif_err(qdev, ifup, qdev->ndev,
3609 "Failed to init routing register for broadcast packets.\n");
3612 /* If we have more than one inbound queue, then turn on RSS in the
3615 if (qdev->rss_ring_count > 1) {
3616 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3617 RT_IDX_RSS_MATCH, 1);
3619 netif_err(qdev, ifup, qdev->ndev,
3620 "Failed to init routing register for MATCH RSS packets.\n");
3625 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3628 netif_err(qdev, ifup, qdev->ndev,
3629 "Failed to init routing register for CAM packets.\n");
3631 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3635 int ql_cam_route_initialize(struct ql_adapter *qdev)
3639 /* If check if the link is up and use to
3640 * determine if we are setting or clearing
3641 * the MAC address in the CAM.
3643 set = ql_read32(qdev, STS);
3644 set &= qdev->port_link_up;
3645 status = ql_set_mac_addr(qdev, set);
3647 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3651 status = ql_route_initialize(qdev);
3653 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3658 static int ql_adapter_initialize(struct ql_adapter *qdev)
3665 * Set up the System register to halt on errors.
3667 value = SYS_EFE | SYS_FAE;
3669 ql_write32(qdev, SYS, mask | value);
3671 /* Set the default queue, and VLAN behavior. */
3672 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3673 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3674 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3676 /* Set the MPI interrupt to enabled. */
3677 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3679 /* Enable the function, set pagesize, enable error checking. */
3680 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3681 FSC_EC | FSC_VM_PAGE_4K;
3682 value |= SPLT_SETTING;
3684 /* Set/clear header splitting. */
3685 mask = FSC_VM_PAGESIZE_MASK |
3686 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3687 ql_write32(qdev, FSC, mask | value);
3689 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3691 /* Set RX packet routing to use port/pci function on which the
3692 * packet arrived on in addition to usual frame routing.
3693 * This is helpful on bonding where both interfaces can have
3694 * the same MAC address.
3696 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3697 /* Reroute all packets to our Interface.
3698 * They may have been routed to MPI firmware
3701 value = ql_read32(qdev, MGMT_RCV_CFG);
3702 value &= ~MGMT_RCV_CFG_RM;
3705 /* Sticky reg needs clearing due to WOL. */
3706 ql_write32(qdev, MGMT_RCV_CFG, mask);
3707 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3709 /* Default WOL is enable on Mezz cards */
3710 if (qdev->pdev->subsystem_device == 0x0068 ||
3711 qdev->pdev->subsystem_device == 0x0180)
3712 qdev->wol = WAKE_MAGIC;
3714 /* Start up the rx queues. */
3715 for (i = 0; i < qdev->rx_ring_count; i++) {
3716 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3718 netif_err(qdev, ifup, qdev->ndev,
3719 "Failed to start rx ring[%d].\n", i);
3724 /* If there is more than one inbound completion queue
3725 * then download a RICB to configure RSS.
3727 if (qdev->rss_ring_count > 1) {
3728 status = ql_start_rss(qdev);
3730 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3735 /* Start up the tx queues. */
3736 for (i = 0; i < qdev->tx_ring_count; i++) {
3737 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3739 netif_err(qdev, ifup, qdev->ndev,
3740 "Failed to start tx ring[%d].\n", i);
3745 /* Initialize the port and set the max framesize. */
3746 status = qdev->nic_ops->port_initialize(qdev);
3748 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3750 /* Set up the MAC address and frame routing filter. */
3751 status = ql_cam_route_initialize(qdev);
3753 netif_err(qdev, ifup, qdev->ndev,
3754 "Failed to init CAM/Routing tables.\n");
3758 /* Start NAPI for the RSS queues. */
3759 for (i = 0; i < qdev->rss_ring_count; i++) {
3760 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3761 "Enabling NAPI for rx_ring[%d].\n", i);
3762 napi_enable(&qdev->rx_ring[i].napi);
3768 /* Issue soft reset to chip. */
3769 static int ql_adapter_reset(struct ql_adapter *qdev)
3773 unsigned long end_jiffies;
3775 /* Clear all the entries in the routing table. */
3776 status = ql_clear_routing_entries(qdev);
3778 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3782 end_jiffies = jiffies +
3783 max((unsigned long)1, usecs_to_jiffies(30));
3785 /* Stop management traffic. */
3786 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3788 /* Wait for the NIC and MGMNT FIFOs to empty. */
3789 ql_wait_fifo_empty(qdev);
3791 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3794 value = ql_read32(qdev, RST_FO);
3795 if ((value & RST_FO_FR) == 0)
3798 } while (time_before(jiffies, end_jiffies));
3800 if (value & RST_FO_FR) {
3801 netif_err(qdev, ifdown, qdev->ndev,
3802 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3803 status = -ETIMEDOUT;
3806 /* Resume management traffic. */
3807 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3811 static void ql_display_dev_info(struct net_device *ndev)
3813 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3815 netif_info(qdev, probe, qdev->ndev,
3816 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3817 "XG Roll = %d, XG Rev = %d.\n",
3820 qdev->chip_rev_id & 0x0000000f,
3821 qdev->chip_rev_id >> 4 & 0x0000000f,
3822 qdev->chip_rev_id >> 8 & 0x0000000f,
3823 qdev->chip_rev_id >> 12 & 0x0000000f);
3824 netif_info(qdev, probe, qdev->ndev,
3825 "MAC address %pM\n", ndev->dev_addr);
3828 int ql_wol(struct ql_adapter *qdev)
3831 u32 wol = MB_WOL_DISABLE;
3833 /* The CAM is still intact after a reset, but if we
3834 * are doing WOL, then we may need to program the
3835 * routing regs. We would also need to issue the mailbox
3836 * commands to instruct the MPI what to do per the ethtool
3840 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3841 WAKE_MCAST | WAKE_BCAST)) {
3842 netif_err(qdev, ifdown, qdev->ndev,
3843 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3848 if (qdev->wol & WAKE_MAGIC) {
3849 status = ql_mb_wol_set_magic(qdev, 1);
3851 netif_err(qdev, ifdown, qdev->ndev,
3852 "Failed to set magic packet on %s.\n",
3856 netif_info(qdev, drv, qdev->ndev,
3857 "Enabled magic packet successfully on %s.\n",
3860 wol |= MB_WOL_MAGIC_PKT;
3864 wol |= MB_WOL_MODE_ON;
3865 status = ql_mb_wol_mode(qdev, wol);
3866 netif_err(qdev, drv, qdev->ndev,
3867 "WOL %s (wol code 0x%x) on %s\n",
3868 (status == 0) ? "Sucessfully set" : "Failed",
3869 wol, qdev->ndev->name);
3875 static int ql_adapter_down(struct ql_adapter *qdev)
3881 /* Don't kill the reset worker thread if we
3882 * are in the process of recovery.
3884 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3885 cancel_delayed_work_sync(&qdev->asic_reset_work);
3886 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3887 cancel_delayed_work_sync(&qdev->mpi_work);
3888 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3889 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3890 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3892 for (i = 0; i < qdev->rss_ring_count; i++)
3893 napi_disable(&qdev->rx_ring[i].napi);
3895 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3897 ql_disable_interrupts(qdev);
3899 ql_tx_ring_clean(qdev);
3901 /* Call netif_napi_del() from common point.
3903 for (i = 0; i < qdev->rss_ring_count; i++)
3904 netif_napi_del(&qdev->rx_ring[i].napi);
3906 ql_free_rx_buffers(qdev);
3908 status = ql_adapter_reset(qdev);
3910 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3915 static int ql_adapter_up(struct ql_adapter *qdev)
3919 err = ql_adapter_initialize(qdev);
3921 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3924 set_bit(QL_ADAPTER_UP, &qdev->flags);
3925 ql_alloc_rx_buffers(qdev);
3926 /* If the port is initialized and the
3927 * link is up the turn on the carrier.
3929 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3930 (ql_read32(qdev, STS) & qdev->port_link_up))
3932 ql_enable_interrupts(qdev);
3933 ql_enable_all_completion_interrupts(qdev);
3934 netif_tx_start_all_queues(qdev->ndev);
3938 ql_adapter_reset(qdev);
3942 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3944 ql_free_mem_resources(qdev);
3948 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3952 if (ql_alloc_mem_resources(qdev)) {
3953 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3956 status = ql_request_irq(qdev);
3960 static int qlge_close(struct net_device *ndev)
3962 struct ql_adapter *qdev = netdev_priv(ndev);
3964 /* If we hit pci_channel_io_perm_failure
3965 * failure condition, then we already
3966 * brought the adapter down.
3968 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3969 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3970 clear_bit(QL_EEH_FATAL, &qdev->flags);
3975 * Wait for device to recover from a reset.
3976 * (Rarely happens, but possible.)
3978 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3980 ql_adapter_down(qdev);
3981 ql_release_adapter_resources(qdev);
3985 static int ql_configure_rings(struct ql_adapter *qdev)
3988 struct rx_ring *rx_ring;
3989 struct tx_ring *tx_ring;
3990 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3991 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3992 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3994 qdev->lbq_buf_order = get_order(lbq_buf_len);
3996 /* In a perfect world we have one RSS ring for each CPU
3997 * and each has it's own vector. To do that we ask for
3998 * cpu_cnt vectors. ql_enable_msix() will adjust the
3999 * vector count to what we actually get. We then
4000 * allocate an RSS ring for each.
4001 * Essentially, we are doing min(cpu_count, msix_vector_count).
4003 qdev->intr_count = cpu_cnt;
4004 ql_enable_msix(qdev);
4005 /* Adjust the RSS ring count to the actual vector count. */
4006 qdev->rss_ring_count = qdev->intr_count;
4007 qdev->tx_ring_count = cpu_cnt;
4008 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4010 for (i = 0; i < qdev->tx_ring_count; i++) {
4011 tx_ring = &qdev->tx_ring[i];
4012 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4013 tx_ring->qdev = qdev;
4015 tx_ring->wq_len = qdev->tx_ring_size;
4017 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4020 * The completion queue ID for the tx rings start
4021 * immediately after the rss rings.
4023 tx_ring->cq_id = qdev->rss_ring_count + i;
4026 for (i = 0; i < qdev->rx_ring_count; i++) {
4027 rx_ring = &qdev->rx_ring[i];
4028 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4029 rx_ring->qdev = qdev;
4031 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4032 if (i < qdev->rss_ring_count) {
4034 * Inbound (RSS) queues.
4036 rx_ring->cq_len = qdev->rx_ring_size;
4038 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4039 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4041 rx_ring->lbq_len * sizeof(__le64);
4042 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4043 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4044 "lbq_buf_size %d, order = %d\n",
4045 rx_ring->lbq_buf_size,
4046 qdev->lbq_buf_order);
4047 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4049 rx_ring->sbq_len * sizeof(__le64);
4050 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4051 rx_ring->type = RX_Q;
4054 * Outbound queue handles outbound completions only.
4056 /* outbound cq is same size as tx_ring it services. */
4057 rx_ring->cq_len = qdev->tx_ring_size;
4059 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4060 rx_ring->lbq_len = 0;
4061 rx_ring->lbq_size = 0;
4062 rx_ring->lbq_buf_size = 0;
4063 rx_ring->sbq_len = 0;
4064 rx_ring->sbq_size = 0;
4065 rx_ring->sbq_buf_size = 0;
4066 rx_ring->type = TX_Q;
4072 static int qlge_open(struct net_device *ndev)
4075 struct ql_adapter *qdev = netdev_priv(ndev);
4077 err = ql_adapter_reset(qdev);
4081 err = ql_configure_rings(qdev);
4085 err = ql_get_adapter_resources(qdev);
4089 err = ql_adapter_up(qdev);
4096 ql_release_adapter_resources(qdev);
4100 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4102 struct rx_ring *rx_ring;
4106 /* Wait for an oustanding reset to complete. */
4107 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4109 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4110 netif_err(qdev, ifup, qdev->ndev,
4111 "Waiting for adapter UP...\n");
4116 netif_err(qdev, ifup, qdev->ndev,
4117 "Timed out waiting for adapter UP\n");
4122 status = ql_adapter_down(qdev);
4126 /* Get the new rx buffer size. */
4127 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4128 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4129 qdev->lbq_buf_order = get_order(lbq_buf_len);
4131 for (i = 0; i < qdev->rss_ring_count; i++) {
4132 rx_ring = &qdev->rx_ring[i];
4133 /* Set the new size. */
4134 rx_ring->lbq_buf_size = lbq_buf_len;
4137 status = ql_adapter_up(qdev);
4143 netif_alert(qdev, ifup, qdev->ndev,
4144 "Driver up/down cycle failed, closing device.\n");
4145 set_bit(QL_ADAPTER_UP, &qdev->flags);
4146 dev_close(qdev->ndev);
4150 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4152 struct ql_adapter *qdev = netdev_priv(ndev);
4155 if (ndev->mtu == 1500 && new_mtu == 9000) {
4156 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4157 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4158 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4162 queue_delayed_work(qdev->workqueue,
4163 &qdev->mpi_port_cfg_work, 3*HZ);
4165 ndev->mtu = new_mtu;
4167 if (!netif_running(qdev->ndev)) {
4171 status = ql_change_rx_buffers(qdev);
4173 netif_err(qdev, ifup, qdev->ndev,
4174 "Changing MTU failed.\n");
4180 static struct net_device_stats *qlge_get_stats(struct net_device
4183 struct ql_adapter *qdev = netdev_priv(ndev);
4184 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4185 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4186 unsigned long pkts, mcast, dropped, errors, bytes;
4190 pkts = mcast = dropped = errors = bytes = 0;
4191 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4192 pkts += rx_ring->rx_packets;
4193 bytes += rx_ring->rx_bytes;
4194 dropped += rx_ring->rx_dropped;
4195 errors += rx_ring->rx_errors;
4196 mcast += rx_ring->rx_multicast;
4198 ndev->stats.rx_packets = pkts;
4199 ndev->stats.rx_bytes = bytes;
4200 ndev->stats.rx_dropped = dropped;
4201 ndev->stats.rx_errors = errors;
4202 ndev->stats.multicast = mcast;
4205 pkts = errors = bytes = 0;
4206 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4207 pkts += tx_ring->tx_packets;
4208 bytes += tx_ring->tx_bytes;
4209 errors += tx_ring->tx_errors;
4211 ndev->stats.tx_packets = pkts;
4212 ndev->stats.tx_bytes = bytes;
4213 ndev->stats.tx_errors = errors;
4214 return &ndev->stats;
4217 static void qlge_set_multicast_list(struct net_device *ndev)
4219 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4220 struct dev_mc_list *mc_ptr;
4223 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4227 * Set or clear promiscuous mode if a
4228 * transition is taking place.
4230 if (ndev->flags & IFF_PROMISC) {
4231 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4232 if (ql_set_routing_reg
4233 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4234 netif_err(qdev, hw, qdev->ndev,
4235 "Failed to set promiscous mode.\n");
4237 set_bit(QL_PROMISCUOUS, &qdev->flags);
4241 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4242 if (ql_set_routing_reg
4243 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4244 netif_err(qdev, hw, qdev->ndev,
4245 "Failed to clear promiscous mode.\n");
4247 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4253 * Set or clear all multicast mode if a
4254 * transition is taking place.
4256 if ((ndev->flags & IFF_ALLMULTI) ||
4257 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4258 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4259 if (ql_set_routing_reg
4260 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4261 netif_err(qdev, hw, qdev->ndev,
4262 "Failed to set all-multi mode.\n");
4264 set_bit(QL_ALLMULTI, &qdev->flags);
4268 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4269 if (ql_set_routing_reg
4270 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4271 netif_err(qdev, hw, qdev->ndev,
4272 "Failed to clear all-multi mode.\n");
4274 clear_bit(QL_ALLMULTI, &qdev->flags);
4279 if (!netdev_mc_empty(ndev)) {
4280 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4283 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
4284 i++, mc_ptr = mc_ptr->next)
4285 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
4286 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4287 netif_err(qdev, hw, qdev->ndev,
4288 "Failed to loadmulticast address.\n");
4289 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4292 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4293 if (ql_set_routing_reg
4294 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4295 netif_err(qdev, hw, qdev->ndev,
4296 "Failed to set multicast match mode.\n");
4298 set_bit(QL_ALLMULTI, &qdev->flags);
4302 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4305 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4307 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4308 struct sockaddr *addr = p;
4311 if (!is_valid_ether_addr(addr->sa_data))
4312 return -EADDRNOTAVAIL;
4313 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4315 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4318 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4319 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4321 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4322 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4326 static void qlge_tx_timeout(struct net_device *ndev)
4328 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4329 ql_queue_asic_error(qdev);
4332 static void ql_asic_reset_work(struct work_struct *work)
4334 struct ql_adapter *qdev =
4335 container_of(work, struct ql_adapter, asic_reset_work.work);
4338 status = ql_adapter_down(qdev);
4342 status = ql_adapter_up(qdev);
4346 /* Restore rx mode. */
4347 clear_bit(QL_ALLMULTI, &qdev->flags);
4348 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4349 qlge_set_multicast_list(qdev->ndev);
4354 netif_alert(qdev, ifup, qdev->ndev,
4355 "Driver up/down cycle failed, closing device\n");
4357 set_bit(QL_ADAPTER_UP, &qdev->flags);
4358 dev_close(qdev->ndev);
4362 static struct nic_operations qla8012_nic_ops = {
4363 .get_flash = ql_get_8012_flash_params,
4364 .port_initialize = ql_8012_port_initialize,
4367 static struct nic_operations qla8000_nic_ops = {
4368 .get_flash = ql_get_8000_flash_params,
4369 .port_initialize = ql_8000_port_initialize,
4372 /* Find the pcie function number for the other NIC
4373 * on this chip. Since both NIC functions share a
4374 * common firmware we have the lowest enabled function
4375 * do any common work. Examples would be resetting
4376 * after a fatal firmware error, or doing a firmware
4379 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4383 u32 nic_func1, nic_func2;
4385 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4390 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4391 MPI_TEST_NIC_FUNC_MASK);
4392 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4393 MPI_TEST_NIC_FUNC_MASK);
4395 if (qdev->func == nic_func1)
4396 qdev->alt_func = nic_func2;
4397 else if (qdev->func == nic_func2)
4398 qdev->alt_func = nic_func1;
4405 static int ql_get_board_info(struct ql_adapter *qdev)
4409 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4413 status = ql_get_alt_pcie_func(qdev);
4417 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4419 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4420 qdev->port_link_up = STS_PL1;
4421 qdev->port_init = STS_PI1;
4422 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4423 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4425 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4426 qdev->port_link_up = STS_PL0;
4427 qdev->port_init = STS_PI0;
4428 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4429 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4431 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4432 qdev->device_id = qdev->pdev->device;
4433 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4434 qdev->nic_ops = &qla8012_nic_ops;
4435 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4436 qdev->nic_ops = &qla8000_nic_ops;
4440 static void ql_release_all(struct pci_dev *pdev)
4442 struct net_device *ndev = pci_get_drvdata(pdev);
4443 struct ql_adapter *qdev = netdev_priv(ndev);
4445 if (qdev->workqueue) {
4446 destroy_workqueue(qdev->workqueue);
4447 qdev->workqueue = NULL;
4451 iounmap(qdev->reg_base);
4452 if (qdev->doorbell_area)
4453 iounmap(qdev->doorbell_area);
4454 vfree(qdev->mpi_coredump);
4455 pci_release_regions(pdev);
4456 pci_set_drvdata(pdev, NULL);
4459 static int __devinit ql_init_device(struct pci_dev *pdev,
4460 struct net_device *ndev, int cards_found)
4462 struct ql_adapter *qdev = netdev_priv(ndev);
4465 memset((void *)qdev, 0, sizeof(*qdev));
4466 err = pci_enable_device(pdev);
4468 dev_err(&pdev->dev, "PCI device enable failed.\n");
4474 pci_set_drvdata(pdev, ndev);
4476 /* Set PCIe read request size */
4477 err = pcie_set_readrq(pdev, 4096);
4479 dev_err(&pdev->dev, "Set readrq failed.\n");
4483 err = pci_request_regions(pdev, DRV_NAME);
4485 dev_err(&pdev->dev, "PCI region request failed.\n");
4489 pci_set_master(pdev);
4490 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4491 set_bit(QL_DMA64, &qdev->flags);
4492 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4494 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4496 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4500 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4504 /* Set PCIe reset type for EEH to fundamental. */
4505 pdev->needs_freset = 1;
4506 pci_save_state(pdev);
4508 ioremap_nocache(pci_resource_start(pdev, 1),
4509 pci_resource_len(pdev, 1));
4510 if (!qdev->reg_base) {
4511 dev_err(&pdev->dev, "Register mapping failed.\n");
4516 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4517 qdev->doorbell_area =
4518 ioremap_nocache(pci_resource_start(pdev, 3),
4519 pci_resource_len(pdev, 3));
4520 if (!qdev->doorbell_area) {
4521 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4526 err = ql_get_board_info(qdev);
4528 dev_err(&pdev->dev, "Register access failed.\n");
4532 qdev->msg_enable = netif_msg_init(debug, default_msg);
4533 spin_lock_init(&qdev->hw_lock);
4534 spin_lock_init(&qdev->stats_lock);
4536 if (qlge_mpi_coredump) {
4537 qdev->mpi_coredump =
4538 vmalloc(sizeof(struct ql_mpi_coredump));
4539 if (qdev->mpi_coredump == NULL) {
4540 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4544 if (qlge_force_coredump)
4545 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4547 /* make sure the EEPROM is good */
4548 err = qdev->nic_ops->get_flash(qdev);
4550 dev_err(&pdev->dev, "Invalid FLASH.\n");
4554 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4556 /* Set up the default ring sizes. */
4557 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4558 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4560 /* Set up the coalescing parameters. */
4561 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4562 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4563 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4564 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4567 * Set up the operating parameters.
4570 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4571 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4572 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4573 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4574 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4575 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4576 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4577 init_completion(&qdev->ide_completion);
4580 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4581 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4582 DRV_NAME, DRV_VERSION);
4586 ql_release_all(pdev);
4588 pci_disable_device(pdev);
4592 static const struct net_device_ops qlge_netdev_ops = {
4593 .ndo_open = qlge_open,
4594 .ndo_stop = qlge_close,
4595 .ndo_start_xmit = qlge_send,
4596 .ndo_change_mtu = qlge_change_mtu,
4597 .ndo_get_stats = qlge_get_stats,
4598 .ndo_set_multicast_list = qlge_set_multicast_list,
4599 .ndo_set_mac_address = qlge_set_mac_address,
4600 .ndo_validate_addr = eth_validate_addr,
4601 .ndo_tx_timeout = qlge_tx_timeout,
4602 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4603 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4604 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4607 static void ql_timer(unsigned long data)
4609 struct ql_adapter *qdev = (struct ql_adapter *)data;
4612 var = ql_read32(qdev, STS);
4613 if (pci_channel_offline(qdev->pdev)) {
4614 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4618 qdev->timer.expires = jiffies + (5*HZ);
4619 add_timer(&qdev->timer);
4622 static int __devinit qlge_probe(struct pci_dev *pdev,
4623 const struct pci_device_id *pci_entry)
4625 struct net_device *ndev = NULL;
4626 struct ql_adapter *qdev = NULL;
4627 static int cards_found = 0;
4630 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4631 min(MAX_CPUS, (int)num_online_cpus()));
4635 err = ql_init_device(pdev, ndev, cards_found);
4641 qdev = netdev_priv(ndev);
4642 SET_NETDEV_DEV(ndev, &pdev->dev);
4649 | NETIF_F_HW_VLAN_TX
4650 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4651 ndev->features |= NETIF_F_GRO;
4653 if (test_bit(QL_DMA64, &qdev->flags))
4654 ndev->features |= NETIF_F_HIGHDMA;
4657 * Set up net_device structure.
4659 ndev->tx_queue_len = qdev->tx_ring_size;
4660 ndev->irq = pdev->irq;
4662 ndev->netdev_ops = &qlge_netdev_ops;
4663 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4664 ndev->watchdog_timeo = 10 * HZ;
4666 err = register_netdev(ndev);
4668 dev_err(&pdev->dev, "net device registration failed.\n");
4669 ql_release_all(pdev);
4670 pci_disable_device(pdev);
4673 /* Start up the timer to trigger EEH if
4676 init_timer_deferrable(&qdev->timer);
4677 qdev->timer.data = (unsigned long)qdev;
4678 qdev->timer.function = ql_timer;
4679 qdev->timer.expires = jiffies + (5*HZ);
4680 add_timer(&qdev->timer);
4682 ql_display_dev_info(ndev);
4683 atomic_set(&qdev->lb_count, 0);
4688 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4690 return qlge_send(skb, ndev);
4693 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4695 return ql_clean_inbound_rx_ring(rx_ring, budget);
4698 static void __devexit qlge_remove(struct pci_dev *pdev)
4700 struct net_device *ndev = pci_get_drvdata(pdev);
4701 struct ql_adapter *qdev = netdev_priv(ndev);
4702 del_timer_sync(&qdev->timer);
4703 unregister_netdev(ndev);
4704 ql_release_all(pdev);
4705 pci_disable_device(pdev);
4709 /* Clean up resources without touching hardware. */
4710 static void ql_eeh_close(struct net_device *ndev)
4713 struct ql_adapter *qdev = netdev_priv(ndev);
4715 if (netif_carrier_ok(ndev)) {
4716 netif_carrier_off(ndev);
4717 netif_stop_queue(ndev);
4720 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4721 cancel_delayed_work_sync(&qdev->asic_reset_work);
4722 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4723 cancel_delayed_work_sync(&qdev->mpi_work);
4724 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4725 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
4726 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4728 for (i = 0; i < qdev->rss_ring_count; i++)
4729 netif_napi_del(&qdev->rx_ring[i].napi);
4731 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4732 ql_tx_ring_clean(qdev);
4733 ql_free_rx_buffers(qdev);
4734 ql_release_adapter_resources(qdev);
4738 * This callback is called by the PCI subsystem whenever
4739 * a PCI bus error is detected.
4741 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4742 enum pci_channel_state state)
4744 struct net_device *ndev = pci_get_drvdata(pdev);
4745 struct ql_adapter *qdev = netdev_priv(ndev);
4748 case pci_channel_io_normal:
4749 return PCI_ERS_RESULT_CAN_RECOVER;
4750 case pci_channel_io_frozen:
4751 netif_device_detach(ndev);
4752 if (netif_running(ndev))
4754 pci_disable_device(pdev);
4755 return PCI_ERS_RESULT_NEED_RESET;
4756 case pci_channel_io_perm_failure:
4758 "%s: pci_channel_io_perm_failure.\n", __func__);
4760 set_bit(QL_EEH_FATAL, &qdev->flags);
4761 return PCI_ERS_RESULT_DISCONNECT;
4764 /* Request a slot reset. */
4765 return PCI_ERS_RESULT_NEED_RESET;
4769 * This callback is called after the PCI buss has been reset.
4770 * Basically, this tries to restart the card from scratch.
4771 * This is a shortened version of the device probe/discovery code,
4772 * it resembles the first-half of the () routine.
4774 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4776 struct net_device *ndev = pci_get_drvdata(pdev);
4777 struct ql_adapter *qdev = netdev_priv(ndev);
4779 pdev->error_state = pci_channel_io_normal;
4781 pci_restore_state(pdev);
4782 if (pci_enable_device(pdev)) {
4783 netif_err(qdev, ifup, qdev->ndev,
4784 "Cannot re-enable PCI device after reset.\n");
4785 return PCI_ERS_RESULT_DISCONNECT;
4787 pci_set_master(pdev);
4789 if (ql_adapter_reset(qdev)) {
4790 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4791 set_bit(QL_EEH_FATAL, &qdev->flags);
4792 return PCI_ERS_RESULT_DISCONNECT;
4795 return PCI_ERS_RESULT_RECOVERED;
4798 static void qlge_io_resume(struct pci_dev *pdev)
4800 struct net_device *ndev = pci_get_drvdata(pdev);
4801 struct ql_adapter *qdev = netdev_priv(ndev);
4804 if (netif_running(ndev)) {
4805 err = qlge_open(ndev);
4807 netif_err(qdev, ifup, qdev->ndev,
4808 "Device initialization failed after reset.\n");
4812 netif_err(qdev, ifup, qdev->ndev,
4813 "Device was not running prior to EEH.\n");
4815 qdev->timer.expires = jiffies + (5*HZ);
4816 add_timer(&qdev->timer);
4817 netif_device_attach(ndev);
4820 static struct pci_error_handlers qlge_err_handler = {
4821 .error_detected = qlge_io_error_detected,
4822 .slot_reset = qlge_io_slot_reset,
4823 .resume = qlge_io_resume,
4826 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4828 struct net_device *ndev = pci_get_drvdata(pdev);
4829 struct ql_adapter *qdev = netdev_priv(ndev);
4832 netif_device_detach(ndev);
4833 del_timer_sync(&qdev->timer);
4835 if (netif_running(ndev)) {
4836 err = ql_adapter_down(qdev);
4842 err = pci_save_state(pdev);
4846 pci_disable_device(pdev);
4848 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4854 static int qlge_resume(struct pci_dev *pdev)
4856 struct net_device *ndev = pci_get_drvdata(pdev);
4857 struct ql_adapter *qdev = netdev_priv(ndev);
4860 pci_set_power_state(pdev, PCI_D0);
4861 pci_restore_state(pdev);
4862 err = pci_enable_device(pdev);
4864 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4867 pci_set_master(pdev);
4869 pci_enable_wake(pdev, PCI_D3hot, 0);
4870 pci_enable_wake(pdev, PCI_D3cold, 0);
4872 if (netif_running(ndev)) {
4873 err = ql_adapter_up(qdev);
4878 qdev->timer.expires = jiffies + (5*HZ);
4879 add_timer(&qdev->timer);
4880 netif_device_attach(ndev);
4884 #endif /* CONFIG_PM */
4886 static void qlge_shutdown(struct pci_dev *pdev)
4888 qlge_suspend(pdev, PMSG_SUSPEND);
4891 static struct pci_driver qlge_driver = {
4893 .id_table = qlge_pci_tbl,
4894 .probe = qlge_probe,
4895 .remove = __devexit_p(qlge_remove),
4897 .suspend = qlge_suspend,
4898 .resume = qlge_resume,
4900 .shutdown = qlge_shutdown,
4901 .err_handler = &qlge_err_handler
4904 static int __init qlge_init_module(void)
4906 return pci_register_driver(&qlge_driver);
4909 static void __exit qlge_exit(void)
4911 pci_unregister_driver(&qlge_driver);
4914 module_init(qlge_init_module);
4915 module_exit(qlge_exit);