2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
34 static DEFINE_MUTEX(bnad_fwimg_mutex);
39 static uint bnad_msix_disable;
40 module_param(bnad_msix_disable, uint, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
43 static uint bnad_ioc_auto_recover = 1;
44 module_param(bnad_ioc_auto_recover, uint, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
47 static uint bna_debugfs_enable = 1;
48 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50 " Range[false:0|true:1]");
55 u32 bnad_rxqs_per_cq = 2;
57 static struct mutex bnad_list_mutex;
58 static LIST_HEAD(bnad_list);
59 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
64 #define BNAD_GET_MBOX_IRQ(_bnad) \
65 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
66 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
67 ((_bnad)->pcidev->irq))
69 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
71 (_res_info)->res_type = BNA_RES_T_MEM; \
72 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
73 (_res_info)->res_u.mem_info.num = (_num); \
74 (_res_info)->res_u.mem_info.len = (_size); \
78 bnad_add_to_list(struct bnad *bnad)
80 mutex_lock(&bnad_list_mutex);
81 list_add_tail(&bnad->list_entry, &bnad_list);
83 mutex_unlock(&bnad_list_mutex);
87 bnad_remove_from_list(struct bnad *bnad)
89 mutex_lock(&bnad_list_mutex);
90 list_del(&bnad->list_entry);
91 mutex_unlock(&bnad_list_mutex);
95 * Reinitialize completions in CQ, once Rx is taken down
98 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
100 struct bna_cq_entry *cmpl;
103 for (i = 0; i < ccb->q_depth; i++) {
104 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
109 /* Tx Datapath functions */
112 /* Caller should ensure that the entry at unmap_q[index] is valid */
114 bnad_tx_buff_unmap(struct bnad *bnad,
115 struct bnad_tx_unmap *unmap_q,
116 u32 q_depth, u32 index)
118 struct bnad_tx_unmap *unmap;
122 unmap = &unmap_q[index];
123 nvecs = unmap->nvecs;
128 dma_unmap_single(&bnad->pcidev->dev,
129 dma_unmap_addr(&unmap->vectors[0], dma_addr),
130 skb_headlen(skb), DMA_TO_DEVICE);
131 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
137 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
139 BNA_QE_INDX_INC(index, q_depth);
140 unmap = &unmap_q[index];
143 dma_unmap_page(&bnad->pcidev->dev,
144 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
145 skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
146 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
150 BNA_QE_INDX_INC(index, q_depth);
156 * Frees all pending Tx Bufs
157 * At this point no activity is expected on the Q,
158 * so DMA unmap & freeing is fine.
161 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
163 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
167 for (i = 0; i < tcb->q_depth; i++) {
168 skb = unmap_q[i].skb;
171 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
173 dev_kfree_skb_any(skb);
178 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
179 * Can be called in a) Interrupt context
183 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
185 u32 sent_packets = 0, sent_bytes = 0;
186 u32 wis, unmap_wis, hw_cons, cons, q_depth;
187 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
188 struct bnad_tx_unmap *unmap;
191 /* Just return if TX is stopped */
192 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
195 hw_cons = *(tcb->hw_consumer_index);
196 cons = tcb->consumer_index;
197 q_depth = tcb->q_depth;
199 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
200 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
203 unmap = &unmap_q[cons];
208 sent_bytes += skb->len;
210 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
213 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
214 dev_kfree_skb_any(skb);
217 /* Update consumer pointers. */
218 tcb->consumer_index = hw_cons;
220 tcb->txq->tx_packets += sent_packets;
221 tcb->txq->tx_bytes += sent_bytes;
227 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
229 struct net_device *netdev = bnad->netdev;
232 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
235 sent = bnad_txcmpl_process(bnad, tcb);
237 if (netif_queue_stopped(netdev) &&
238 netif_carrier_ok(netdev) &&
239 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
240 BNAD_NETIF_WAKE_THRESHOLD) {
241 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
242 netif_wake_queue(netdev);
243 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
248 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
249 bna_ib_ack(tcb->i_dbell, sent);
251 smp_mb__before_clear_bit();
252 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
257 /* MSIX Tx Completion Handler */
259 bnad_msix_tx(int irq, void *data)
261 struct bna_tcb *tcb = (struct bna_tcb *)data;
262 struct bnad *bnad = tcb->bnad;
264 bnad_tx_complete(bnad, tcb);
270 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
272 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
274 unmap_q->reuse_pi = -1;
275 unmap_q->alloc_order = -1;
276 unmap_q->map_size = 0;
277 unmap_q->type = BNAD_RXBUF_NONE;
280 /* Default is page-based allocation. Multi-buffer support - TBD */
282 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
284 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
287 bnad_rxq_alloc_uninit(bnad, rcb);
289 mtu = bna_enet_mtu_get(&bnad->bna.enet);
290 order = get_order(mtu);
292 if (bna_is_small_rxq(rcb->id)) {
293 unmap_q->alloc_order = 0;
294 unmap_q->map_size = rcb->rxq->buffer_size;
296 unmap_q->alloc_order = order;
298 (rcb->rxq->buffer_size > 2048) ?
299 PAGE_SIZE << order : 2048;
302 BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
304 unmap_q->type = BNAD_RXBUF_PAGE;
310 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
315 dma_unmap_page(&bnad->pcidev->dev,
316 dma_unmap_addr(&unmap->vector, dma_addr),
317 unmap->vector.len, DMA_FROM_DEVICE);
318 put_page(unmap->page);
320 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
321 unmap->vector.len = 0;
325 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
330 dma_unmap_single(&bnad->pcidev->dev,
331 dma_unmap_addr(&unmap->vector, dma_addr),
332 unmap->vector.len, DMA_FROM_DEVICE);
333 dev_kfree_skb_any(unmap->skb);
335 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
336 unmap->vector.len = 0;
340 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
342 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
345 for (i = 0; i < rcb->q_depth; i++) {
346 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
348 if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
349 bnad_rxq_cleanup_page(bnad, unmap);
351 bnad_rxq_cleanup_skb(bnad, unmap);
353 bnad_rxq_alloc_uninit(bnad, rcb);
357 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
359 u32 alloced, prod, q_depth;
360 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
361 struct bnad_rx_unmap *unmap, *prev;
362 struct bna_rxq_entry *rxent;
364 u32 page_offset, alloc_size;
367 prod = rcb->producer_index;
368 q_depth = rcb->q_depth;
370 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
374 unmap = &unmap_q->unmap[prod];
376 if (unmap_q->reuse_pi < 0) {
377 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
378 unmap_q->alloc_order);
381 prev = &unmap_q->unmap[unmap_q->reuse_pi];
383 page_offset = prev->page_offset + unmap_q->map_size;
387 if (unlikely(!page)) {
388 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
389 rcb->rxq->rxbuf_alloc_failed++;
393 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
394 unmap_q->map_size, DMA_FROM_DEVICE);
397 unmap->page_offset = page_offset;
398 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
399 unmap->vector.len = unmap_q->map_size;
400 page_offset += unmap_q->map_size;
402 if (page_offset < alloc_size)
403 unmap_q->reuse_pi = prod;
405 unmap_q->reuse_pi = -1;
407 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
408 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
409 BNA_QE_INDX_INC(prod, q_depth);
414 if (likely(alloced)) {
415 rcb->producer_index = prod;
417 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
418 bna_rxq_prod_indx_doorbell(rcb);
425 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
427 u32 alloced, prod, q_depth, buff_sz;
428 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
429 struct bnad_rx_unmap *unmap;
430 struct bna_rxq_entry *rxent;
434 buff_sz = rcb->rxq->buffer_size;
435 prod = rcb->producer_index;
436 q_depth = rcb->q_depth;
440 unmap = &unmap_q->unmap[prod];
442 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
444 if (unlikely(!skb)) {
445 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
446 rcb->rxq->rxbuf_alloc_failed++;
449 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
450 buff_sz, DMA_FROM_DEVICE);
453 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
454 unmap->vector.len = buff_sz;
456 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
457 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
458 BNA_QE_INDX_INC(prod, q_depth);
463 if (likely(alloced)) {
464 rcb->producer_index = prod;
466 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
467 bna_rxq_prod_indx_doorbell(rcb);
474 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
476 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
479 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
480 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
483 if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
484 bnad_rxq_refill_page(bnad, rcb, to_alloc);
486 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
489 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
491 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
492 BNA_CQ_EF_L4_CKSUM_OK)
494 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
495 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
496 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
497 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
498 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
499 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
500 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
501 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
503 static inline struct sk_buff *
504 bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl,
505 struct bnad_rx_unmap_q *unmap_q,
506 struct bnad_rx_unmap *unmap,
507 u32 length, u32 flags)
509 struct bnad *bnad = rx_ctrl->bnad;
512 if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) {
513 skb = napi_get_frags(&rx_ctrl->napi);
517 dma_unmap_page(&bnad->pcidev->dev,
518 dma_unmap_addr(&unmap->vector, dma_addr),
519 unmap->vector.len, DMA_FROM_DEVICE);
520 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
521 unmap->page, unmap->page_offset, length);
523 skb->data_len += length;
524 skb->truesize += length;
527 unmap->vector.len = 0;
535 dma_unmap_single(&bnad->pcidev->dev,
536 dma_unmap_addr(&unmap->vector, dma_addr),
537 unmap->vector.len, DMA_FROM_DEVICE);
539 skb_put(skb, length);
541 skb->protocol = eth_type_trans(skb, bnad->netdev);
544 unmap->vector.len = 0;
549 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
551 struct bna_cq_entry *cq, *cmpl;
552 struct bna_rcb *rcb = NULL;
553 struct bnad_rx_unmap_q *unmap_q;
554 struct bnad_rx_unmap *unmap;
556 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
557 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
558 u32 packets = 0, length = 0, flags, masked_flags;
560 prefetch(bnad->netdev);
563 cmpl = &cq[ccb->producer_index];
565 while (cmpl->valid && (packets < budget)) {
567 flags = ntohl(cmpl->flags);
568 length = ntohs(cmpl->length);
569 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
571 if (bna_is_small_rxq(cmpl->rxq_id))
576 unmap_q = rcb->unmap_q;
577 unmap = &unmap_q->unmap[rcb->consumer_index];
579 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
580 BNA_CQ_EF_FCS_ERROR |
581 BNA_CQ_EF_TOO_LONG))) {
582 if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
583 bnad_rxq_cleanup_page(bnad, unmap);
585 bnad_rxq_cleanup_skb(bnad, unmap);
587 rcb->rxq->rx_packets_with_error++;
591 skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap,
597 masked_flags = flags & flags_cksum_prot_mask;
600 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
601 ((masked_flags == flags_tcp4) ||
602 (masked_flags == flags_udp4) ||
603 (masked_flags == flags_tcp6) ||
604 (masked_flags == flags_udp6))))
605 skb->ip_summed = CHECKSUM_UNNECESSARY;
607 skb_checksum_none_assert(skb);
609 rcb->rxq->rx_packets++;
610 rcb->rxq->rx_bytes += length;
612 if (flags & BNA_CQ_EF_VLAN)
613 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
615 if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
616 napi_gro_frags(&rx_ctrl->napi);
618 netif_receive_skb(skb);
622 BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth);
623 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
624 cmpl = &cq[ccb->producer_index];
627 napi_gro_flush(&rx_ctrl->napi, false);
628 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
629 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
631 bnad_rxq_post(bnad, ccb->rcb[0]);
633 bnad_rxq_post(bnad, ccb->rcb[1]);
639 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
641 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
642 struct napi_struct *napi = &rx_ctrl->napi;
644 if (likely(napi_schedule_prep(napi))) {
645 __napi_schedule(napi);
646 rx_ctrl->rx_schedule++;
650 /* MSIX Rx Path Handler */
652 bnad_msix_rx(int irq, void *data)
654 struct bna_ccb *ccb = (struct bna_ccb *)data;
657 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
658 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
664 /* Interrupt handlers */
666 /* Mbox Interrupt Handlers */
668 bnad_msix_mbox_handler(int irq, void *data)
672 struct bnad *bnad = (struct bnad *)data;
674 spin_lock_irqsave(&bnad->bna_lock, flags);
675 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
676 spin_unlock_irqrestore(&bnad->bna_lock, flags);
680 bna_intr_status_get(&bnad->bna, intr_status);
682 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
683 bna_mbox_handler(&bnad->bna, intr_status);
685 spin_unlock_irqrestore(&bnad->bna_lock, flags);
691 bnad_isr(int irq, void *data)
696 struct bnad *bnad = (struct bnad *)data;
697 struct bnad_rx_info *rx_info;
698 struct bnad_rx_ctrl *rx_ctrl;
699 struct bna_tcb *tcb = NULL;
701 spin_lock_irqsave(&bnad->bna_lock, flags);
702 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
703 spin_unlock_irqrestore(&bnad->bna_lock, flags);
707 bna_intr_status_get(&bnad->bna, intr_status);
709 if (unlikely(!intr_status)) {
710 spin_unlock_irqrestore(&bnad->bna_lock, flags);
714 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
715 bna_mbox_handler(&bnad->bna, intr_status);
717 spin_unlock_irqrestore(&bnad->bna_lock, flags);
719 if (!BNA_IS_INTX_DATA_INTR(intr_status))
722 /* Process data interrupts */
724 for (i = 0; i < bnad->num_tx; i++) {
725 for (j = 0; j < bnad->num_txq_per_tx; j++) {
726 tcb = bnad->tx_info[i].tcb[j];
727 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
728 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
732 for (i = 0; i < bnad->num_rx; i++) {
733 rx_info = &bnad->rx_info[i];
736 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
737 rx_ctrl = &rx_info->rx_ctrl[j];
739 bnad_netif_rx_schedule_poll(bnad,
747 * Called in interrupt / callback context
748 * with bna_lock held, so cfg_flags access is OK
751 bnad_enable_mbox_irq(struct bnad *bnad)
753 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
755 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
759 * Called with bnad->bna_lock held b'cos of
760 * bnad->cfg_flags access.
763 bnad_disable_mbox_irq(struct bnad *bnad)
765 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
767 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
771 bnad_set_netdev_perm_addr(struct bnad *bnad)
773 struct net_device *netdev = bnad->netdev;
775 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
776 if (is_zero_ether_addr(netdev->dev_addr))
777 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
780 /* Control Path Handlers */
784 bnad_cb_mbox_intr_enable(struct bnad *bnad)
786 bnad_enable_mbox_irq(bnad);
790 bnad_cb_mbox_intr_disable(struct bnad *bnad)
792 bnad_disable_mbox_irq(bnad);
796 bnad_cb_ioceth_ready(struct bnad *bnad)
798 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
799 complete(&bnad->bnad_completions.ioc_comp);
803 bnad_cb_ioceth_failed(struct bnad *bnad)
805 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
806 complete(&bnad->bnad_completions.ioc_comp);
810 bnad_cb_ioceth_disabled(struct bnad *bnad)
812 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
813 complete(&bnad->bnad_completions.ioc_comp);
817 bnad_cb_enet_disabled(void *arg)
819 struct bnad *bnad = (struct bnad *)arg;
821 netif_carrier_off(bnad->netdev);
822 complete(&bnad->bnad_completions.enet_comp);
826 bnad_cb_ethport_link_status(struct bnad *bnad,
827 enum bna_link_status link_status)
829 bool link_up = false;
831 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
833 if (link_status == BNA_CEE_UP) {
834 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
835 BNAD_UPDATE_CTR(bnad, cee_toggle);
836 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
838 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
839 BNAD_UPDATE_CTR(bnad, cee_toggle);
840 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
844 if (!netif_carrier_ok(bnad->netdev)) {
846 printk(KERN_WARNING "bna: %s link up\n",
848 netif_carrier_on(bnad->netdev);
849 BNAD_UPDATE_CTR(bnad, link_toggle);
850 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
851 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
853 struct bna_tcb *tcb =
854 bnad->tx_info[tx_id].tcb[tcb_id];
861 if (test_bit(BNAD_TXQ_TX_STARTED,
865 * Transmit Schedule */
866 printk(KERN_INFO "bna: %s %d "
873 BNAD_UPDATE_CTR(bnad,
879 BNAD_UPDATE_CTR(bnad,
886 if (netif_carrier_ok(bnad->netdev)) {
887 printk(KERN_WARNING "bna: %s link down\n",
889 netif_carrier_off(bnad->netdev);
890 BNAD_UPDATE_CTR(bnad, link_toggle);
896 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
898 struct bnad *bnad = (struct bnad *)arg;
900 complete(&bnad->bnad_completions.tx_comp);
904 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
906 struct bnad_tx_info *tx_info =
907 (struct bnad_tx_info *)tcb->txq->tx->priv;
910 tx_info->tcb[tcb->id] = tcb;
914 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
916 struct bnad_tx_info *tx_info =
917 (struct bnad_tx_info *)tcb->txq->tx->priv;
919 tx_info->tcb[tcb->id] = NULL;
924 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
926 struct bnad_rx_info *rx_info =
927 (struct bnad_rx_info *)ccb->cq->rx->priv;
929 rx_info->rx_ctrl[ccb->id].ccb = ccb;
930 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
934 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
936 struct bnad_rx_info *rx_info =
937 (struct bnad_rx_info *)ccb->cq->rx->priv;
939 rx_info->rx_ctrl[ccb->id].ccb = NULL;
943 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
945 struct bnad_tx_info *tx_info =
946 (struct bnad_tx_info *)tx->priv;
951 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
952 tcb = tx_info->tcb[i];
956 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
957 netif_stop_subqueue(bnad->netdev, txq_id);
958 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
959 bnad->netdev->name, txq_id);
964 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
966 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
971 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
972 tcb = tx_info->tcb[i];
977 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
978 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
979 BUG_ON(*(tcb->hw_consumer_index) != 0);
981 if (netif_carrier_ok(bnad->netdev)) {
982 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
983 bnad->netdev->name, txq_id);
984 netif_wake_subqueue(bnad->netdev, txq_id);
985 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
990 * Workaround for first ioceth enable failure & we
991 * get a 0 MAC address. We try to get the MAC address
994 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
995 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
996 bnad_set_netdev_perm_addr(bnad);
1001 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1004 bnad_tx_cleanup(struct delayed_work *work)
1006 struct bnad_tx_info *tx_info =
1007 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1008 struct bnad *bnad = NULL;
1009 struct bna_tcb *tcb;
1010 unsigned long flags;
1013 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1014 tcb = tx_info->tcb[i];
1020 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1025 bnad_txq_cleanup(bnad, tcb);
1027 smp_mb__before_clear_bit();
1028 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1032 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1033 msecs_to_jiffies(1));
1037 spin_lock_irqsave(&bnad->bna_lock, flags);
1038 bna_tx_cleanup_complete(tx_info->tx);
1039 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1043 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1045 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1046 struct bna_tcb *tcb;
1049 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1050 tcb = tx_info->tcb[i];
1055 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1059 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1061 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1062 struct bna_ccb *ccb;
1063 struct bnad_rx_ctrl *rx_ctrl;
1066 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1067 rx_ctrl = &rx_info->rx_ctrl[i];
1072 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1075 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1080 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1083 bnad_rx_cleanup(void *work)
1085 struct bnad_rx_info *rx_info =
1086 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1087 struct bnad_rx_ctrl *rx_ctrl;
1088 struct bnad *bnad = NULL;
1089 unsigned long flags;
1092 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1093 rx_ctrl = &rx_info->rx_ctrl[i];
1098 bnad = rx_ctrl->ccb->bnad;
1101 * Wait till the poll handler has exited
1102 * and nothing can be scheduled anymore
1104 napi_disable(&rx_ctrl->napi);
1106 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1107 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1108 if (rx_ctrl->ccb->rcb[1])
1109 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1112 spin_lock_irqsave(&bnad->bna_lock, flags);
1113 bna_rx_cleanup_complete(rx_info->rx);
1114 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1118 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1120 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1121 struct bna_ccb *ccb;
1122 struct bnad_rx_ctrl *rx_ctrl;
1125 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1126 rx_ctrl = &rx_info->rx_ctrl[i];
1131 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1134 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1137 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1141 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1143 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1144 struct bna_ccb *ccb;
1145 struct bna_rcb *rcb;
1146 struct bnad_rx_ctrl *rx_ctrl;
1149 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1150 rx_ctrl = &rx_info->rx_ctrl[i];
1155 napi_enable(&rx_ctrl->napi);
1157 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1162 bnad_rxq_alloc_init(bnad, rcb);
1163 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1164 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1165 bnad_rxq_post(bnad, rcb);
1171 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1173 struct bnad *bnad = (struct bnad *)arg;
1175 complete(&bnad->bnad_completions.rx_comp);
1179 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1181 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1182 complete(&bnad->bnad_completions.mcast_comp);
1186 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1187 struct bna_stats *stats)
1189 if (status == BNA_CB_SUCCESS)
1190 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1192 if (!netif_running(bnad->netdev) ||
1193 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1196 mod_timer(&bnad->stats_timer,
1197 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1201 bnad_cb_enet_mtu_set(struct bnad *bnad)
1203 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1204 complete(&bnad->bnad_completions.mtu_comp);
1208 bnad_cb_completion(void *arg, enum bfa_status status)
1210 struct bnad_iocmd_comp *iocmd_comp =
1211 (struct bnad_iocmd_comp *)arg;
1213 iocmd_comp->comp_status = (u32) status;
1214 complete(&iocmd_comp->comp);
1217 /* Resource allocation, free functions */
1220 bnad_mem_free(struct bnad *bnad,
1221 struct bna_mem_info *mem_info)
1226 if (mem_info->mdl == NULL)
1229 for (i = 0; i < mem_info->num; i++) {
1230 if (mem_info->mdl[i].kva != NULL) {
1231 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1232 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1234 dma_free_coherent(&bnad->pcidev->dev,
1235 mem_info->mdl[i].len,
1236 mem_info->mdl[i].kva, dma_pa);
1238 kfree(mem_info->mdl[i].kva);
1241 kfree(mem_info->mdl);
1242 mem_info->mdl = NULL;
1246 bnad_mem_alloc(struct bnad *bnad,
1247 struct bna_mem_info *mem_info)
1252 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1253 mem_info->mdl = NULL;
1257 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1259 if (mem_info->mdl == NULL)
1262 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1263 for (i = 0; i < mem_info->num; i++) {
1264 mem_info->mdl[i].len = mem_info->len;
1265 mem_info->mdl[i].kva =
1266 dma_alloc_coherent(&bnad->pcidev->dev,
1267 mem_info->len, &dma_pa,
1270 if (mem_info->mdl[i].kva == NULL)
1273 BNA_SET_DMA_ADDR(dma_pa,
1274 &(mem_info->mdl[i].dma));
1277 for (i = 0; i < mem_info->num; i++) {
1278 mem_info->mdl[i].len = mem_info->len;
1279 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1281 if (mem_info->mdl[i].kva == NULL)
1289 bnad_mem_free(bnad, mem_info);
1293 /* Free IRQ for Mailbox */
1295 bnad_mbox_irq_free(struct bnad *bnad)
1298 unsigned long flags;
1300 spin_lock_irqsave(&bnad->bna_lock, flags);
1301 bnad_disable_mbox_irq(bnad);
1302 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1304 irq = BNAD_GET_MBOX_IRQ(bnad);
1305 free_irq(irq, bnad);
1309 * Allocates IRQ for Mailbox, but keep it disabled
1310 * This will be enabled once we get the mbox enable callback
1314 bnad_mbox_irq_alloc(struct bnad *bnad)
1317 unsigned long irq_flags, flags;
1319 irq_handler_t irq_handler;
1321 spin_lock_irqsave(&bnad->bna_lock, flags);
1322 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1323 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1324 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1327 irq_handler = (irq_handler_t)bnad_isr;
1328 irq = bnad->pcidev->irq;
1329 irq_flags = IRQF_SHARED;
1332 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1333 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1336 * Set the Mbox IRQ disable flag, so that the IRQ handler
1337 * called from request_irq() for SHARED IRQs do not execute
1339 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1341 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1343 err = request_irq(irq, irq_handler, irq_flags,
1344 bnad->mbox_irq_name, bnad);
1350 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1352 kfree(intr_info->idl);
1353 intr_info->idl = NULL;
1356 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1358 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1359 u32 txrx_id, struct bna_intr_info *intr_info)
1361 int i, vector_start = 0;
1363 unsigned long flags;
1365 spin_lock_irqsave(&bnad->bna_lock, flags);
1366 cfg_flags = bnad->cfg_flags;
1367 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1369 if (cfg_flags & BNAD_CF_MSIX) {
1370 intr_info->intr_type = BNA_INTR_T_MSIX;
1371 intr_info->idl = kcalloc(intr_info->num,
1372 sizeof(struct bna_intr_descr),
1374 if (!intr_info->idl)
1379 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1383 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1384 (bnad->num_tx * bnad->num_txq_per_tx) +
1392 for (i = 0; i < intr_info->num; i++)
1393 intr_info->idl[i].vector = vector_start + i;
1395 intr_info->intr_type = BNA_INTR_T_INTX;
1397 intr_info->idl = kcalloc(intr_info->num,
1398 sizeof(struct bna_intr_descr),
1400 if (!intr_info->idl)
1405 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1409 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1416 /* NOTE: Should be called for MSIX only
1417 * Unregisters Tx MSIX vector(s) from the kernel
1420 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1426 for (i = 0; i < num_txqs; i++) {
1427 if (tx_info->tcb[i] == NULL)
1430 vector_num = tx_info->tcb[i]->intr_vector;
1431 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1435 /* NOTE: Should be called for MSIX only
1436 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1439 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1440 u32 tx_id, int num_txqs)
1446 for (i = 0; i < num_txqs; i++) {
1447 vector_num = tx_info->tcb[i]->intr_vector;
1448 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1449 tx_id + tx_info->tcb[i]->id);
1450 err = request_irq(bnad->msix_table[vector_num].vector,
1451 (irq_handler_t)bnad_msix_tx, 0,
1452 tx_info->tcb[i]->name,
1462 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1466 /* NOTE: Should be called for MSIX only
1467 * Unregisters Rx MSIX vector(s) from the kernel
1470 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1476 for (i = 0; i < num_rxps; i++) {
1477 if (rx_info->rx_ctrl[i].ccb == NULL)
1480 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1481 free_irq(bnad->msix_table[vector_num].vector,
1482 rx_info->rx_ctrl[i].ccb);
1486 /* NOTE: Should be called for MSIX only
1487 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1490 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1491 u32 rx_id, int num_rxps)
1497 for (i = 0; i < num_rxps; i++) {
1498 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1499 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1501 rx_id + rx_info->rx_ctrl[i].ccb->id);
1502 err = request_irq(bnad->msix_table[vector_num].vector,
1503 (irq_handler_t)bnad_msix_rx, 0,
1504 rx_info->rx_ctrl[i].ccb->name,
1505 rx_info->rx_ctrl[i].ccb);
1514 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1518 /* Free Tx object Resources */
1520 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1524 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1525 if (res_info[i].res_type == BNA_RES_T_MEM)
1526 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1527 else if (res_info[i].res_type == BNA_RES_T_INTR)
1528 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1532 /* Allocates memory and interrupt resources for Tx object */
1534 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1539 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1540 if (res_info[i].res_type == BNA_RES_T_MEM)
1541 err = bnad_mem_alloc(bnad,
1542 &res_info[i].res_u.mem_info);
1543 else if (res_info[i].res_type == BNA_RES_T_INTR)
1544 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1545 &res_info[i].res_u.intr_info);
1552 bnad_tx_res_free(bnad, res_info);
1556 /* Free Rx object Resources */
1558 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1562 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1563 if (res_info[i].res_type == BNA_RES_T_MEM)
1564 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1565 else if (res_info[i].res_type == BNA_RES_T_INTR)
1566 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1570 /* Allocates memory and interrupt resources for Rx object */
1572 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1577 /* All memory needs to be allocated before setup_ccbs */
1578 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1579 if (res_info[i].res_type == BNA_RES_T_MEM)
1580 err = bnad_mem_alloc(bnad,
1581 &res_info[i].res_u.mem_info);
1582 else if (res_info[i].res_type == BNA_RES_T_INTR)
1583 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1584 &res_info[i].res_u.intr_info);
1591 bnad_rx_res_free(bnad, res_info);
1595 /* Timer callbacks */
1598 bnad_ioc_timeout(unsigned long data)
1600 struct bnad *bnad = (struct bnad *)data;
1601 unsigned long flags;
1603 spin_lock_irqsave(&bnad->bna_lock, flags);
1604 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1605 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1609 bnad_ioc_hb_check(unsigned long data)
1611 struct bnad *bnad = (struct bnad *)data;
1612 unsigned long flags;
1614 spin_lock_irqsave(&bnad->bna_lock, flags);
1615 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1616 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1620 bnad_iocpf_timeout(unsigned long data)
1622 struct bnad *bnad = (struct bnad *)data;
1623 unsigned long flags;
1625 spin_lock_irqsave(&bnad->bna_lock, flags);
1626 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1627 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1631 bnad_iocpf_sem_timeout(unsigned long data)
1633 struct bnad *bnad = (struct bnad *)data;
1634 unsigned long flags;
1636 spin_lock_irqsave(&bnad->bna_lock, flags);
1637 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1638 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1642 * All timer routines use bnad->bna_lock to protect against
1643 * the following race, which may occur in case of no locking:
1651 /* b) Dynamic Interrupt Moderation Timer */
1653 bnad_dim_timeout(unsigned long data)
1655 struct bnad *bnad = (struct bnad *)data;
1656 struct bnad_rx_info *rx_info;
1657 struct bnad_rx_ctrl *rx_ctrl;
1659 unsigned long flags;
1661 if (!netif_carrier_ok(bnad->netdev))
1664 spin_lock_irqsave(&bnad->bna_lock, flags);
1665 for (i = 0; i < bnad->num_rx; i++) {
1666 rx_info = &bnad->rx_info[i];
1669 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1670 rx_ctrl = &rx_info->rx_ctrl[j];
1673 bna_rx_dim_update(rx_ctrl->ccb);
1677 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1678 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1679 mod_timer(&bnad->dim_timer,
1680 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1681 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1684 /* c) Statistics Timer */
1686 bnad_stats_timeout(unsigned long data)
1688 struct bnad *bnad = (struct bnad *)data;
1689 unsigned long flags;
1691 if (!netif_running(bnad->netdev) ||
1692 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1695 spin_lock_irqsave(&bnad->bna_lock, flags);
1696 bna_hw_stats_get(&bnad->bna);
1697 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1701 * Set up timer for DIM
1702 * Called with bnad->bna_lock held
1705 bnad_dim_timer_start(struct bnad *bnad)
1707 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1708 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1709 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1710 (unsigned long)bnad);
1711 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1712 mod_timer(&bnad->dim_timer,
1713 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1718 * Set up timer for statistics
1719 * Called with mutex_lock(&bnad->conf_mutex) held
1722 bnad_stats_timer_start(struct bnad *bnad)
1724 unsigned long flags;
1726 spin_lock_irqsave(&bnad->bna_lock, flags);
1727 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1728 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1729 (unsigned long)bnad);
1730 mod_timer(&bnad->stats_timer,
1731 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1733 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1737 * Stops the stats timer
1738 * Called with mutex_lock(&bnad->conf_mutex) held
1741 bnad_stats_timer_stop(struct bnad *bnad)
1744 unsigned long flags;
1746 spin_lock_irqsave(&bnad->bna_lock, flags);
1747 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1749 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1751 del_timer_sync(&bnad->stats_timer);
1757 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1759 int i = 1; /* Index 0 has broadcast address */
1760 struct netdev_hw_addr *mc_addr;
1762 netdev_for_each_mc_addr(mc_addr, netdev) {
1763 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1770 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1772 struct bnad_rx_ctrl *rx_ctrl =
1773 container_of(napi, struct bnad_rx_ctrl, napi);
1774 struct bnad *bnad = rx_ctrl->bnad;
1777 rx_ctrl->rx_poll_ctr++;
1779 if (!netif_carrier_ok(bnad->netdev))
1782 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1787 napi_complete(napi);
1789 rx_ctrl->rx_complete++;
1792 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1797 #define BNAD_NAPI_POLL_QUOTA 64
1799 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1801 struct bnad_rx_ctrl *rx_ctrl;
1804 /* Initialize & enable NAPI */
1805 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1806 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1807 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1808 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1813 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1817 /* First disable and then clean up */
1818 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1819 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1822 /* Should be held with conf_lock held */
1824 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1826 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1827 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1828 unsigned long flags;
1833 init_completion(&bnad->bnad_completions.tx_comp);
1834 spin_lock_irqsave(&bnad->bna_lock, flags);
1835 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1836 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1837 wait_for_completion(&bnad->bnad_completions.tx_comp);
1839 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1840 bnad_tx_msix_unregister(bnad, tx_info,
1841 bnad->num_txq_per_tx);
1843 spin_lock_irqsave(&bnad->bna_lock, flags);
1844 bna_tx_destroy(tx_info->tx);
1845 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1850 bnad_tx_res_free(bnad, res_info);
1853 /* Should be held with conf_lock held */
1855 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1858 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1859 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1860 struct bna_intr_info *intr_info =
1861 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1862 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1863 static const struct bna_tx_event_cbfn tx_cbfn = {
1864 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1865 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1866 .tx_stall_cbfn = bnad_cb_tx_stall,
1867 .tx_resume_cbfn = bnad_cb_tx_resume,
1868 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1872 unsigned long flags;
1874 tx_info->tx_id = tx_id;
1876 /* Initialize the Tx object configuration */
1877 tx_config->num_txq = bnad->num_txq_per_tx;
1878 tx_config->txq_depth = bnad->txq_depth;
1879 tx_config->tx_type = BNA_TX_T_REGULAR;
1880 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1882 /* Get BNA's resource requirement for one tx object */
1883 spin_lock_irqsave(&bnad->bna_lock, flags);
1884 bna_tx_res_req(bnad->num_txq_per_tx,
1885 bnad->txq_depth, res_info);
1886 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1888 /* Fill Unmap Q memory requirements */
1889 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1890 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1893 /* Allocate resources */
1894 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1898 /* Ask BNA to create one Tx object, supplying required resources */
1899 spin_lock_irqsave(&bnad->bna_lock, flags);
1900 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1902 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1907 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1908 (work_func_t)bnad_tx_cleanup);
1910 /* Register ISR for the Tx object */
1911 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1912 err = bnad_tx_msix_register(bnad, tx_info,
1913 tx_id, bnad->num_txq_per_tx);
1918 spin_lock_irqsave(&bnad->bna_lock, flags);
1920 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1925 bnad_tx_res_free(bnad, res_info);
1929 /* Setup the rx config for bna_rx_create */
1930 /* bnad decides the configuration */
1932 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1934 rx_config->rx_type = BNA_RX_T_REGULAR;
1935 rx_config->num_paths = bnad->num_rxp_per_rx;
1936 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
1938 if (bnad->num_rxp_per_rx > 1) {
1939 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1940 rx_config->rss_config.hash_type =
1941 (BFI_ENET_RSS_IPV6 |
1942 BFI_ENET_RSS_IPV6_TCP |
1944 BFI_ENET_RSS_IPV4_TCP);
1945 rx_config->rss_config.hash_mask =
1946 bnad->num_rxp_per_rx - 1;
1947 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1948 sizeof(rx_config->rss_config.toeplitz_hash_key));
1950 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1951 memset(&rx_config->rss_config, 0,
1952 sizeof(rx_config->rss_config));
1954 rx_config->rxp_type = BNA_RXP_SLR;
1955 rx_config->q_depth = bnad->rxq_depth;
1957 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1959 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1963 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1965 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1968 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1969 rx_info->rx_ctrl[i].bnad = bnad;
1972 /* Called with mutex_lock(&bnad->conf_mutex) held */
1974 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
1976 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1977 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1978 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1979 unsigned long flags;
1986 spin_lock_irqsave(&bnad->bna_lock, flags);
1987 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1988 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1989 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1992 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1994 del_timer_sync(&bnad->dim_timer);
1997 init_completion(&bnad->bnad_completions.rx_comp);
1998 spin_lock_irqsave(&bnad->bna_lock, flags);
1999 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2000 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2001 wait_for_completion(&bnad->bnad_completions.rx_comp);
2003 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2004 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2006 bnad_napi_delete(bnad, rx_id);
2008 spin_lock_irqsave(&bnad->bna_lock, flags);
2009 bna_rx_destroy(rx_info->rx);
2013 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2015 bnad_rx_res_free(bnad, res_info);
2018 /* Called with mutex_lock(&bnad->conf_mutex) held */
2020 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2023 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2024 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2025 struct bna_intr_info *intr_info =
2026 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2027 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2028 static const struct bna_rx_event_cbfn rx_cbfn = {
2029 .rcb_setup_cbfn = NULL,
2030 .rcb_destroy_cbfn = NULL,
2031 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2032 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2033 .rx_stall_cbfn = bnad_cb_rx_stall,
2034 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2035 .rx_post_cbfn = bnad_cb_rx_post,
2038 unsigned long flags;
2040 rx_info->rx_id = rx_id;
2042 /* Initialize the Rx object configuration */
2043 bnad_init_rx_config(bnad, rx_config);
2045 /* Get BNA's resource requirement for one Rx object */
2046 spin_lock_irqsave(&bnad->bna_lock, flags);
2047 bna_rx_res_req(rx_config, res_info);
2048 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2050 /* Fill Unmap Q memory requirements */
2051 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
2052 rx_config->num_paths +
2053 ((rx_config->rxp_type == BNA_RXP_SINGLE) ?
2054 0 : rx_config->num_paths),
2055 ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) +
2056 sizeof(struct bnad_rx_unmap_q)));
2058 /* Allocate resource */
2059 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2063 bnad_rx_ctrl_init(bnad, rx_id);
2065 /* Ask BNA to create one Rx object, supplying required resources */
2066 spin_lock_irqsave(&bnad->bna_lock, flags);
2067 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2071 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2075 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2077 INIT_WORK(&rx_info->rx_cleanup_work,
2078 (work_func_t)(bnad_rx_cleanup));
2081 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2082 * so that IRQ handler cannot schedule NAPI at this point.
2084 bnad_napi_add(bnad, rx_id);
2086 /* Register ISR for the Rx object */
2087 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2088 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2089 rx_config->num_paths);
2094 spin_lock_irqsave(&bnad->bna_lock, flags);
2096 /* Set up Dynamic Interrupt Moderation Vector */
2097 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2098 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2100 /* Enable VLAN filtering only on the default Rx */
2101 bna_rx_vlanfilter_enable(rx);
2103 /* Start the DIM timer */
2104 bnad_dim_timer_start(bnad);
2108 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2113 bnad_destroy_rx(bnad, rx_id);
2117 /* Called with conf_lock & bnad->bna_lock held */
2119 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2121 struct bnad_tx_info *tx_info;
2123 tx_info = &bnad->tx_info[0];
2127 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2130 /* Called with conf_lock & bnad->bna_lock held */
2132 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2134 struct bnad_rx_info *rx_info;
2137 for (i = 0; i < bnad->num_rx; i++) {
2138 rx_info = &bnad->rx_info[i];
2141 bna_rx_coalescing_timeo_set(rx_info->rx,
2142 bnad->rx_coalescing_timeo);
2147 * Called with bnad->bna_lock held
2150 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2154 if (!is_valid_ether_addr(mac_addr))
2155 return -EADDRNOTAVAIL;
2157 /* If datapath is down, pretend everything went through */
2158 if (!bnad->rx_info[0].rx)
2161 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2162 if (ret != BNA_CB_SUCCESS)
2163 return -EADDRNOTAVAIL;
2168 /* Should be called with conf_lock held */
2170 bnad_enable_default_bcast(struct bnad *bnad)
2172 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2174 unsigned long flags;
2176 init_completion(&bnad->bnad_completions.mcast_comp);
2178 spin_lock_irqsave(&bnad->bna_lock, flags);
2179 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2180 bnad_cb_rx_mcast_add);
2181 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2183 if (ret == BNA_CB_SUCCESS)
2184 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2188 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2194 /* Called with mutex_lock(&bnad->conf_mutex) held */
2196 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2199 unsigned long flags;
2201 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2202 spin_lock_irqsave(&bnad->bna_lock, flags);
2203 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2204 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2208 /* Statistics utilities */
2210 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2214 for (i = 0; i < bnad->num_rx; i++) {
2215 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2216 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2217 stats->rx_packets += bnad->rx_info[i].
2218 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2219 stats->rx_bytes += bnad->rx_info[i].
2220 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2221 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2222 bnad->rx_info[i].rx_ctrl[j].ccb->
2224 stats->rx_packets +=
2225 bnad->rx_info[i].rx_ctrl[j].
2226 ccb->rcb[1]->rxq->rx_packets;
2228 bnad->rx_info[i].rx_ctrl[j].
2229 ccb->rcb[1]->rxq->rx_bytes;
2234 for (i = 0; i < bnad->num_tx; i++) {
2235 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2236 if (bnad->tx_info[i].tcb[j]) {
2237 stats->tx_packets +=
2238 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2240 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2247 * Must be called with the bna_lock held.
2250 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2252 struct bfi_enet_stats_mac *mac_stats;
2256 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2258 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2259 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2260 mac_stats->rx_undersize;
2261 stats->tx_errors = mac_stats->tx_fcs_error +
2262 mac_stats->tx_undersize;
2263 stats->rx_dropped = mac_stats->rx_drop;
2264 stats->tx_dropped = mac_stats->tx_drop;
2265 stats->multicast = mac_stats->rx_multicast;
2266 stats->collisions = mac_stats->tx_total_collision;
2268 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2270 /* receive ring buffer overflow ?? */
2272 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2273 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2274 /* recv'r fifo overrun */
2275 bmap = bna_rx_rid_mask(&bnad->bna);
2276 for (i = 0; bmap; i++) {
2278 stats->rx_fifo_errors +=
2279 bnad->stats.bna_stats->
2280 hw_stats.rxf_stats[i].frame_drops;
2288 bnad_mbox_irq_sync(struct bnad *bnad)
2291 unsigned long flags;
2293 spin_lock_irqsave(&bnad->bna_lock, flags);
2294 if (bnad->cfg_flags & BNAD_CF_MSIX)
2295 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2297 irq = bnad->pcidev->irq;
2298 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2300 synchronize_irq(irq);
2303 /* Utility used by bnad_start_xmit, for doing TSO */
2305 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2309 if (skb_header_cloned(skb)) {
2310 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2312 BNAD_UPDATE_CTR(bnad, tso_err);
2318 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2319 * excluding the length field.
2321 if (skb->protocol == htons(ETH_P_IP)) {
2322 struct iphdr *iph = ip_hdr(skb);
2324 /* Do we really need these? */
2328 tcp_hdr(skb)->check =
2329 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2331 BNAD_UPDATE_CTR(bnad, tso4);
2333 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2335 ipv6h->payload_len = 0;
2336 tcp_hdr(skb)->check =
2337 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2339 BNAD_UPDATE_CTR(bnad, tso6);
2346 * Initialize Q numbers depending on Rx Paths
2347 * Called with bnad->bna_lock held, because of cfg_flags
2351 bnad_q_num_init(struct bnad *bnad)
2355 rxps = min((uint)num_online_cpus(),
2356 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2358 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2359 rxps = 1; /* INTx */
2363 bnad->num_rxp_per_rx = rxps;
2364 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2368 * Adjusts the Q numbers, given a number of msix vectors
2369 * Give preference to RSS as opposed to Tx priority Queues,
2370 * in such a case, just use 1 Tx Q
2371 * Called with bnad->bna_lock held b'cos of cfg_flags access
2374 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2376 bnad->num_txq_per_tx = 1;
2377 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2378 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2379 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2380 bnad->num_rxp_per_rx = msix_vectors -
2381 (bnad->num_tx * bnad->num_txq_per_tx) -
2382 BNAD_MAILBOX_MSIX_VECTORS;
2384 bnad->num_rxp_per_rx = 1;
2387 /* Enable / disable ioceth */
2389 bnad_ioceth_disable(struct bnad *bnad)
2391 unsigned long flags;
2394 spin_lock_irqsave(&bnad->bna_lock, flags);
2395 init_completion(&bnad->bnad_completions.ioc_comp);
2396 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2397 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2399 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2400 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2402 err = bnad->bnad_completions.ioc_comp_status;
2407 bnad_ioceth_enable(struct bnad *bnad)
2410 unsigned long flags;
2412 spin_lock_irqsave(&bnad->bna_lock, flags);
2413 init_completion(&bnad->bnad_completions.ioc_comp);
2414 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2415 bna_ioceth_enable(&bnad->bna.ioceth);
2416 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2418 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2419 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2421 err = bnad->bnad_completions.ioc_comp_status;
2426 /* Free BNA resources */
2428 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2433 for (i = 0; i < res_val_max; i++)
2434 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2437 /* Allocates memory and interrupt resources for BNA */
2439 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2444 for (i = 0; i < res_val_max; i++) {
2445 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2452 bnad_res_free(bnad, res_info, res_val_max);
2456 /* Interrupt enable / disable */
2458 bnad_enable_msix(struct bnad *bnad)
2461 unsigned long flags;
2463 spin_lock_irqsave(&bnad->bna_lock, flags);
2464 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2465 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2468 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2470 if (bnad->msix_table)
2474 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2476 if (!bnad->msix_table)
2479 for (i = 0; i < bnad->msix_num; i++)
2480 bnad->msix_table[i].entry = i;
2482 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2484 /* Not enough MSI-X vectors. */
2485 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2486 ret, bnad->msix_num);
2488 spin_lock_irqsave(&bnad->bna_lock, flags);
2489 /* ret = #of vectors that we got */
2490 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2491 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2492 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2494 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2495 BNAD_MAILBOX_MSIX_VECTORS;
2497 if (bnad->msix_num > ret)
2500 /* Try once more with adjusted numbers */
2501 /* If this fails, fall back to INTx */
2502 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2510 pci_intx(bnad->pcidev, 0);
2515 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2517 kfree(bnad->msix_table);
2518 bnad->msix_table = NULL;
2520 spin_lock_irqsave(&bnad->bna_lock, flags);
2521 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2522 bnad_q_num_init(bnad);
2523 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2527 bnad_disable_msix(struct bnad *bnad)
2530 unsigned long flags;
2532 spin_lock_irqsave(&bnad->bna_lock, flags);
2533 cfg_flags = bnad->cfg_flags;
2534 if (bnad->cfg_flags & BNAD_CF_MSIX)
2535 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2536 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2538 if (cfg_flags & BNAD_CF_MSIX) {
2539 pci_disable_msix(bnad->pcidev);
2540 kfree(bnad->msix_table);
2541 bnad->msix_table = NULL;
2545 /* Netdev entry points */
2547 bnad_open(struct net_device *netdev)
2550 struct bnad *bnad = netdev_priv(netdev);
2551 struct bna_pause_config pause_config;
2553 unsigned long flags;
2555 mutex_lock(&bnad->conf_mutex);
2558 err = bnad_setup_tx(bnad, 0);
2563 err = bnad_setup_rx(bnad, 0);
2568 pause_config.tx_pause = 0;
2569 pause_config.rx_pause = 0;
2571 mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2573 spin_lock_irqsave(&bnad->bna_lock, flags);
2574 bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2575 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2576 bna_enet_enable(&bnad->bna.enet);
2577 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2579 /* Enable broadcast */
2580 bnad_enable_default_bcast(bnad);
2582 /* Restore VLANs, if any */
2583 bnad_restore_vlans(bnad, 0);
2585 /* Set the UCAST address */
2586 spin_lock_irqsave(&bnad->bna_lock, flags);
2587 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2588 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2590 /* Start the stats timer */
2591 bnad_stats_timer_start(bnad);
2593 mutex_unlock(&bnad->conf_mutex);
2598 bnad_destroy_tx(bnad, 0);
2601 mutex_unlock(&bnad->conf_mutex);
2606 bnad_stop(struct net_device *netdev)
2608 struct bnad *bnad = netdev_priv(netdev);
2609 unsigned long flags;
2611 mutex_lock(&bnad->conf_mutex);
2613 /* Stop the stats timer */
2614 bnad_stats_timer_stop(bnad);
2616 init_completion(&bnad->bnad_completions.enet_comp);
2618 spin_lock_irqsave(&bnad->bna_lock, flags);
2619 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2620 bnad_cb_enet_disabled);
2621 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2623 wait_for_completion(&bnad->bnad_completions.enet_comp);
2625 bnad_destroy_tx(bnad, 0);
2626 bnad_destroy_rx(bnad, 0);
2628 /* Synchronize mailbox IRQ */
2629 bnad_mbox_irq_sync(bnad);
2631 mutex_unlock(&bnad->conf_mutex);
2637 /* Returns 0 for success */
2639 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2640 struct sk_buff *skb, struct bna_txq_entry *txqent)
2646 if (vlan_tx_tag_present(skb)) {
2647 vlan_tag = (u16)vlan_tx_tag_get(skb);
2648 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2650 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2651 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2652 | (vlan_tag & 0x1fff);
2653 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2655 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2657 if (skb_is_gso(skb)) {
2658 gso_size = skb_shinfo(skb)->gso_size;
2659 if (unlikely(gso_size > bnad->netdev->mtu)) {
2660 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2663 if (unlikely((gso_size + skb_transport_offset(skb) +
2664 tcp_hdrlen(skb)) >= skb->len)) {
2665 txqent->hdr.wi.opcode =
2666 __constant_htons(BNA_TXQ_WI_SEND);
2667 txqent->hdr.wi.lso_mss = 0;
2668 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2670 txqent->hdr.wi.opcode =
2671 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2672 txqent->hdr.wi.lso_mss = htons(gso_size);
2675 if (bnad_tso_prepare(bnad, skb)) {
2676 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2680 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2681 txqent->hdr.wi.l4_hdr_size_n_offset =
2682 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2683 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2685 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
2686 txqent->hdr.wi.lso_mss = 0;
2688 if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
2689 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2693 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2696 if (skb->protocol == __constant_htons(ETH_P_IP))
2697 proto = ip_hdr(skb)->protocol;
2698 #ifdef NETIF_F_IPV6_CSUM
2699 else if (skb->protocol ==
2700 __constant_htons(ETH_P_IPV6)) {
2701 /* nexthdr may not be TCP immediately. */
2702 proto = ipv6_hdr(skb)->nexthdr;
2705 if (proto == IPPROTO_TCP) {
2706 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2707 txqent->hdr.wi.l4_hdr_size_n_offset =
2708 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2709 (0, skb_transport_offset(skb)));
2711 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2713 if (unlikely(skb_headlen(skb) <
2714 skb_transport_offset(skb) +
2716 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2719 } else if (proto == IPPROTO_UDP) {
2720 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2721 txqent->hdr.wi.l4_hdr_size_n_offset =
2722 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2723 (0, skb_transport_offset(skb)));
2725 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2726 if (unlikely(skb_headlen(skb) <
2727 skb_transport_offset(skb) +
2728 sizeof(struct udphdr))) {
2729 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2734 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2738 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2741 txqent->hdr.wi.flags = htons(flags);
2742 txqent->hdr.wi.frame_length = htonl(skb->len);
2748 * bnad_start_xmit : Netdev entry point for Transmit
2749 * Called under lock held by net_device
2752 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2754 struct bnad *bnad = netdev_priv(netdev);
2756 struct bna_tcb *tcb = NULL;
2757 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2758 u32 prod, q_depth, vect_id;
2759 u32 wis, vectors, len;
2761 dma_addr_t dma_addr;
2762 struct bna_txq_entry *txqent;
2764 len = skb_headlen(skb);
2766 /* Sanity checks for the skb */
2768 if (unlikely(skb->len <= ETH_HLEN)) {
2770 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2771 return NETDEV_TX_OK;
2773 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2775 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2776 return NETDEV_TX_OK;
2778 if (unlikely(len == 0)) {
2780 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2781 return NETDEV_TX_OK;
2784 tcb = bnad->tx_info[0].tcb[txq_id];
2785 q_depth = tcb->q_depth;
2786 prod = tcb->producer_index;
2788 unmap_q = tcb->unmap_q;
2791 * Takes care of the Tx that is scheduled between clearing the flag
2792 * and the netif_tx_stop_all_queues() call.
2794 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2796 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2797 return NETDEV_TX_OK;
2800 vectors = 1 + skb_shinfo(skb)->nr_frags;
2801 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2803 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2805 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2806 return NETDEV_TX_OK;
2809 /* Check for available TxQ resources */
2810 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2811 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2812 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2814 sent = bnad_txcmpl_process(bnad, tcb);
2815 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2816 bna_ib_ack(tcb->i_dbell, sent);
2817 smp_mb__before_clear_bit();
2818 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2820 netif_stop_queue(netdev);
2821 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2826 * Check again to deal with race condition between
2827 * netif_stop_queue here, and netif_wake_queue in
2828 * interrupt handler which is not inside netif tx lock.
2830 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2831 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2832 return NETDEV_TX_BUSY;
2834 netif_wake_queue(netdev);
2835 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2839 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
2840 head_unmap = &unmap_q[prod];
2842 /* Program the opcode, flags, frame_len, num_vectors in WI */
2843 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
2845 return NETDEV_TX_OK;
2847 txqent->hdr.wi.reserved = 0;
2848 txqent->hdr.wi.num_vectors = vectors;
2850 head_unmap->skb = skb;
2851 head_unmap->nvecs = 0;
2853 /* Program the vectors */
2855 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2856 len, DMA_TO_DEVICE);
2857 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
2858 txqent->vector[0].length = htons(len);
2859 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
2860 head_unmap->nvecs++;
2862 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
2863 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2864 u16 size = skb_frag_size(frag);
2866 if (unlikely(size == 0)) {
2867 /* Undo the changes starting at tcb->producer_index */
2868 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
2869 tcb->producer_index);
2871 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2872 return NETDEV_TX_OK;
2878 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2880 BNA_QE_INDX_INC(prod, q_depth);
2881 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
2882 txqent->hdr.wi_ext.opcode =
2883 __constant_htons(BNA_TXQ_WI_EXTENSION);
2884 unmap = &unmap_q[prod];
2887 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2888 0, size, DMA_TO_DEVICE);
2889 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2890 txqent->vector[vect_id].length = htons(size);
2891 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
2893 head_unmap->nvecs++;
2896 if (unlikely(len != skb->len)) {
2897 /* Undo the changes starting at tcb->producer_index */
2898 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
2900 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2901 return NETDEV_TX_OK;
2904 BNA_QE_INDX_INC(prod, q_depth);
2905 tcb->producer_index = prod;
2909 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2910 return NETDEV_TX_OK;
2912 bna_txq_prod_indx_doorbell(tcb);
2915 return NETDEV_TX_OK;
2919 * Used spin_lock to synchronize reading of stats structures, which
2920 * is written by BNA under the same lock.
2922 static struct rtnl_link_stats64 *
2923 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2925 struct bnad *bnad = netdev_priv(netdev);
2926 unsigned long flags;
2928 spin_lock_irqsave(&bnad->bna_lock, flags);
2930 bnad_netdev_qstats_fill(bnad, stats);
2931 bnad_netdev_hwstats_fill(bnad, stats);
2933 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2939 bnad_set_rx_mode(struct net_device *netdev)
2941 struct bnad *bnad = netdev_priv(netdev);
2942 u32 new_mask, valid_mask;
2943 unsigned long flags;
2945 spin_lock_irqsave(&bnad->bna_lock, flags);
2947 new_mask = valid_mask = 0;
2949 if (netdev->flags & IFF_PROMISC) {
2950 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2951 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2952 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2953 bnad->cfg_flags |= BNAD_CF_PROMISC;
2956 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2957 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2958 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2959 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2963 if (netdev->flags & IFF_ALLMULTI) {
2964 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2965 new_mask |= BNA_RXMODE_ALLMULTI;
2966 valid_mask |= BNA_RXMODE_ALLMULTI;
2967 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2970 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2971 new_mask &= ~BNA_RXMODE_ALLMULTI;
2972 valid_mask |= BNA_RXMODE_ALLMULTI;
2973 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2977 if (bnad->rx_info[0].rx == NULL)
2980 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2982 if (!netdev_mc_empty(netdev)) {
2984 int mc_count = netdev_mc_count(netdev);
2986 /* Index 0 holds the broadcast address */
2988 kzalloc((mc_count + 1) * ETH_ALEN,
2993 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2995 /* Copy rest of the MC addresses */
2996 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2998 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
3001 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
3005 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3009 * bna_lock is used to sync writes to netdev->addr
3010 * conf_lock cannot be used since this call may be made
3011 * in a non-blocking context.
3014 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
3017 struct bnad *bnad = netdev_priv(netdev);
3018 struct sockaddr *sa = (struct sockaddr *)mac_addr;
3019 unsigned long flags;
3021 spin_lock_irqsave(&bnad->bna_lock, flags);
3023 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3026 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
3028 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3034 bnad_mtu_set(struct bnad *bnad, int mtu)
3036 unsigned long flags;
3038 init_completion(&bnad->bnad_completions.mtu_comp);
3040 spin_lock_irqsave(&bnad->bna_lock, flags);
3041 bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
3042 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3044 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3046 return bnad->bnad_completions.mtu_comp_status;
3050 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3052 int err, mtu = netdev->mtu;
3053 struct bnad *bnad = netdev_priv(netdev);
3055 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3058 mutex_lock(&bnad->conf_mutex);
3060 netdev->mtu = new_mtu;
3062 mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
3063 err = bnad_mtu_set(bnad, mtu);
3067 mutex_unlock(&bnad->conf_mutex);
3072 bnad_vlan_rx_add_vid(struct net_device *netdev,
3075 struct bnad *bnad = netdev_priv(netdev);
3076 unsigned long flags;
3078 if (!bnad->rx_info[0].rx)
3081 mutex_lock(&bnad->conf_mutex);
3083 spin_lock_irqsave(&bnad->bna_lock, flags);
3084 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3085 set_bit(vid, bnad->active_vlans);
3086 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3088 mutex_unlock(&bnad->conf_mutex);
3094 bnad_vlan_rx_kill_vid(struct net_device *netdev,
3097 struct bnad *bnad = netdev_priv(netdev);
3098 unsigned long flags;
3100 if (!bnad->rx_info[0].rx)
3103 mutex_lock(&bnad->conf_mutex);
3105 spin_lock_irqsave(&bnad->bna_lock, flags);
3106 clear_bit(vid, bnad->active_vlans);
3107 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3108 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3110 mutex_unlock(&bnad->conf_mutex);
3115 #ifdef CONFIG_NET_POLL_CONTROLLER
3117 bnad_netpoll(struct net_device *netdev)
3119 struct bnad *bnad = netdev_priv(netdev);
3120 struct bnad_rx_info *rx_info;
3121 struct bnad_rx_ctrl *rx_ctrl;
3125 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3126 bna_intx_disable(&bnad->bna, curr_mask);
3127 bnad_isr(bnad->pcidev->irq, netdev);
3128 bna_intx_enable(&bnad->bna, curr_mask);
3131 * Tx processing may happen in sending context, so no need
3132 * to explicitly process completions here
3136 for (i = 0; i < bnad->num_rx; i++) {
3137 rx_info = &bnad->rx_info[i];
3140 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3141 rx_ctrl = &rx_info->rx_ctrl[j];
3143 bnad_netif_rx_schedule_poll(bnad,
3151 static const struct net_device_ops bnad_netdev_ops = {
3152 .ndo_open = bnad_open,
3153 .ndo_stop = bnad_stop,
3154 .ndo_start_xmit = bnad_start_xmit,
3155 .ndo_get_stats64 = bnad_get_stats64,
3156 .ndo_set_rx_mode = bnad_set_rx_mode,
3157 .ndo_validate_addr = eth_validate_addr,
3158 .ndo_set_mac_address = bnad_set_mac_address,
3159 .ndo_change_mtu = bnad_change_mtu,
3160 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3161 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3162 #ifdef CONFIG_NET_POLL_CONTROLLER
3163 .ndo_poll_controller = bnad_netpoll
3168 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3170 struct net_device *netdev = bnad->netdev;
3172 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3173 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3174 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
3176 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3177 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3178 NETIF_F_TSO | NETIF_F_TSO6;
3180 netdev->features |= netdev->hw_features |
3181 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3184 netdev->features |= NETIF_F_HIGHDMA;
3186 netdev->mem_start = bnad->mmio_start;
3187 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3189 netdev->netdev_ops = &bnad_netdev_ops;
3190 bnad_set_ethtool_ops(netdev);
3194 * 1. Initialize the bnad structure
3195 * 2. Setup netdev pointer in pci_dev
3196 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3197 * 4. Initialize work queue.
3200 bnad_init(struct bnad *bnad,
3201 struct pci_dev *pdev, struct net_device *netdev)
3203 unsigned long flags;
3205 SET_NETDEV_DEV(netdev, &pdev->dev);
3206 pci_set_drvdata(pdev, netdev);
3208 bnad->netdev = netdev;
3209 bnad->pcidev = pdev;
3210 bnad->mmio_start = pci_resource_start(pdev, 0);
3211 bnad->mmio_len = pci_resource_len(pdev, 0);
3212 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3214 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3215 pci_set_drvdata(pdev, NULL);
3218 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3219 (unsigned long long) bnad->mmio_len);
3221 spin_lock_irqsave(&bnad->bna_lock, flags);
3222 if (!bnad_msix_disable)
3223 bnad->cfg_flags = BNAD_CF_MSIX;
3225 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3227 bnad_q_num_init(bnad);
3228 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3230 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3231 (bnad->num_rx * bnad->num_rxp_per_rx) +
3232 BNAD_MAILBOX_MSIX_VECTORS;
3234 bnad->txq_depth = BNAD_TXQ_DEPTH;
3235 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3237 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3238 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3240 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3241 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3250 * Must be called after bnad_pci_uninit()
3251 * so that iounmap() and pci_set_drvdata(NULL)
3252 * happens only after PCI uninitialization.
3255 bnad_uninit(struct bnad *bnad)
3258 flush_workqueue(bnad->work_q);
3259 destroy_workqueue(bnad->work_q);
3260 bnad->work_q = NULL;
3264 iounmap(bnad->bar0);
3265 pci_set_drvdata(bnad->pcidev, NULL);
3270 a) Per ioceth mutes used for serializing configuration
3271 changes from OS interface
3272 b) spin lock used to protect bna state machine
3275 bnad_lock_init(struct bnad *bnad)
3277 spin_lock_init(&bnad->bna_lock);
3278 mutex_init(&bnad->conf_mutex);
3279 mutex_init(&bnad_list_mutex);
3283 bnad_lock_uninit(struct bnad *bnad)
3285 mutex_destroy(&bnad->conf_mutex);
3286 mutex_destroy(&bnad_list_mutex);
3289 /* PCI Initialization */
3291 bnad_pci_init(struct bnad *bnad,
3292 struct pci_dev *pdev, bool *using_dac)
3296 err = pci_enable_device(pdev);
3299 err = pci_request_regions(pdev, BNAD_NAME);
3301 goto disable_device;
3302 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3303 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3306 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3308 err = dma_set_coherent_mask(&pdev->dev,
3311 goto release_regions;
3315 pci_set_master(pdev);
3319 pci_release_regions(pdev);
3321 pci_disable_device(pdev);
3327 bnad_pci_uninit(struct pci_dev *pdev)
3329 pci_release_regions(pdev);
3330 pci_disable_device(pdev);
3334 bnad_pci_probe(struct pci_dev *pdev,
3335 const struct pci_device_id *pcidev_id)
3341 struct net_device *netdev;
3342 struct bfa_pcidev pcidev_info;
3343 unsigned long flags;
3345 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3346 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3348 mutex_lock(&bnad_fwimg_mutex);
3349 if (!cna_get_firmware_buf(pdev)) {
3350 mutex_unlock(&bnad_fwimg_mutex);
3351 pr_warn("Failed to load Firmware Image!\n");
3354 mutex_unlock(&bnad_fwimg_mutex);
3357 * Allocates sizeof(struct net_device + struct bnad)
3358 * bnad = netdev->priv
3360 netdev = alloc_etherdev(sizeof(struct bnad));
3365 bnad = netdev_priv(netdev);
3366 bnad_lock_init(bnad);
3367 bnad_add_to_list(bnad);
3369 mutex_lock(&bnad->conf_mutex);
3371 * PCI initialization
3372 * Output : using_dac = 1 for 64 bit DMA
3373 * = 0 for 32 bit DMA
3376 err = bnad_pci_init(bnad, pdev, &using_dac);
3381 * Initialize bnad structure
3382 * Setup relation between pci_dev & netdev
3384 err = bnad_init(bnad, pdev, netdev);
3388 /* Initialize netdev structure, set up ethtool ops */
3389 bnad_netdev_init(bnad, using_dac);
3391 /* Set link to down state */
3392 netif_carrier_off(netdev);
3394 /* Setup the debugfs node for this bfad */
3395 if (bna_debugfs_enable)
3396 bnad_debugfs_init(bnad);
3398 /* Get resource requirement form bna */
3399 spin_lock_irqsave(&bnad->bna_lock, flags);
3400 bna_res_req(&bnad->res_info[0]);
3401 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3403 /* Allocate resources from bna */
3404 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3410 /* Setup pcidev_info for bna_init() */
3411 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3412 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3413 pcidev_info.device_id = bnad->pcidev->device;
3414 pcidev_info.pci_bar_kva = bnad->bar0;
3416 spin_lock_irqsave(&bnad->bna_lock, flags);
3417 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3418 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3420 bnad->stats.bna_stats = &bna->stats;
3422 bnad_enable_msix(bnad);
3423 err = bnad_mbox_irq_alloc(bnad);
3428 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3429 ((unsigned long)bnad));
3430 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3431 ((unsigned long)bnad));
3432 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3433 ((unsigned long)bnad));
3434 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3435 ((unsigned long)bnad));
3437 /* Now start the timer before calling IOC */
3438 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3439 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3443 * If the call back comes with error, we bail out.
3444 * This is a catastrophic error.
3446 err = bnad_ioceth_enable(bnad);
3448 pr_err("BNA: Initialization failed err=%d\n",
3453 spin_lock_irqsave(&bnad->bna_lock, flags);
3454 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3455 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3456 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3457 bna_attr(bna)->num_rxp - 1);
3458 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3459 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3462 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3464 goto disable_ioceth;
3466 spin_lock_irqsave(&bnad->bna_lock, flags);
3467 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3468 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3470 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3473 goto disable_ioceth;
3476 spin_lock_irqsave(&bnad->bna_lock, flags);
3477 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3478 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3480 /* Get the burnt-in mac */
3481 spin_lock_irqsave(&bnad->bna_lock, flags);
3482 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3483 bnad_set_netdev_perm_addr(bnad);
3484 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3486 mutex_unlock(&bnad->conf_mutex);
3488 /* Finally, reguister with net_device layer */
3489 err = register_netdev(netdev);
3491 pr_err("BNA : Registering with netdev failed\n");
3494 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3499 mutex_unlock(&bnad->conf_mutex);
3503 mutex_lock(&bnad->conf_mutex);
3504 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3506 bnad_ioceth_disable(bnad);
3507 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3508 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3509 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3510 spin_lock_irqsave(&bnad->bna_lock, flags);
3512 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3513 bnad_mbox_irq_free(bnad);
3514 bnad_disable_msix(bnad);
3516 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3518 /* Remove the debugfs node for this bnad */
3519 kfree(bnad->regdata);
3520 bnad_debugfs_uninit(bnad);
3523 bnad_pci_uninit(pdev);
3525 mutex_unlock(&bnad->conf_mutex);
3526 bnad_remove_from_list(bnad);
3527 bnad_lock_uninit(bnad);
3528 free_netdev(netdev);
3533 bnad_pci_remove(struct pci_dev *pdev)
3535 struct net_device *netdev = pci_get_drvdata(pdev);
3538 unsigned long flags;
3543 pr_info("%s bnad_pci_remove\n", netdev->name);
3544 bnad = netdev_priv(netdev);
3547 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3548 unregister_netdev(netdev);
3550 mutex_lock(&bnad->conf_mutex);
3551 bnad_ioceth_disable(bnad);
3552 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3553 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3554 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3555 spin_lock_irqsave(&bnad->bna_lock, flags);
3557 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3559 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3560 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3561 bnad_mbox_irq_free(bnad);
3562 bnad_disable_msix(bnad);
3563 bnad_pci_uninit(pdev);
3564 mutex_unlock(&bnad->conf_mutex);
3565 bnad_remove_from_list(bnad);
3566 bnad_lock_uninit(bnad);
3567 /* Remove the debugfs node for this bnad */
3568 kfree(bnad->regdata);
3569 bnad_debugfs_uninit(bnad);
3571 free_netdev(netdev);
3574 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3576 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3577 PCI_DEVICE_ID_BROCADE_CT),
3578 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3579 .class_mask = 0xffff00
3582 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3583 BFA_PCI_DEVICE_ID_CT2),
3584 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3585 .class_mask = 0xffff00
3590 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3592 static struct pci_driver bnad_pci_driver = {
3594 .id_table = bnad_pci_id_table,
3595 .probe = bnad_pci_probe,
3596 .remove = bnad_pci_remove,
3600 bnad_module_init(void)
3604 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3607 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3609 err = pci_register_driver(&bnad_pci_driver);
3611 pr_err("bna : PCI registration failed in module init "
3620 bnad_module_exit(void)
3622 pci_unregister_driver(&bnad_pci_driver);
3623 release_firmware(bfi_fw);
3626 module_init(bnad_module_init);
3627 module_exit(bnad_module_exit);
3629 MODULE_AUTHOR("Brocade");
3630 MODULE_LICENSE("GPL");
3631 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3632 MODULE_VERSION(BNAD_VERSION);
3633 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3634 MODULE_FIRMWARE(CNA_FW_FILE_CT2);