2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
18 #include <linux/netdevice.h>
19 #include <linux/skbuff.h>
20 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
24 #include <linux/if_ether.h>
26 #include <linux/prefetch.h>
32 static DEFINE_MUTEX(bnad_fwimg_mutex);
37 static uint bnad_msix_disable;
38 module_param(bnad_msix_disable, uint, 0444);
39 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
41 static uint bnad_ioc_auto_recover = 1;
42 module_param(bnad_ioc_auto_recover, uint, 0444);
43 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
48 u32 bnad_rxqs_per_cq = 2;
50 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
55 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
57 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
59 #define BNAD_GET_MBOX_IRQ(_bnad) \
60 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
61 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
62 ((_bnad)->pcidev->irq))
64 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
66 (_res_info)->res_type = BNA_RES_T_MEM; \
67 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
68 (_res_info)->res_u.mem_info.num = (_num); \
69 (_res_info)->res_u.mem_info.len = \
70 sizeof(struct bnad_unmap_q) + \
71 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
74 #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
77 * Reinitialize completions in CQ, once Rx is taken down
80 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
82 struct bna_cq_entry *cmpl, *next_cmpl;
83 unsigned int wi_range, wis = 0, ccb_prod = 0;
86 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
89 for (i = 0; i < ccb->q_depth; i++) {
91 if (likely(--wi_range))
94 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
96 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
105 * Frees all pending Tx Bufs
106 * At this point no activity is expected on the Q,
107 * so DMA unmap & freeing is fine.
110 bnad_free_all_txbufs(struct bnad *bnad,
114 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
115 struct bnad_skb_unmap *unmap_array;
116 struct sk_buff *skb = NULL;
119 unmap_array = unmap_q->unmap_array;
122 while (unmap_cons < unmap_q->q_depth) {
123 skb = unmap_array[unmap_cons].skb;
128 unmap_array[unmap_cons].skb = NULL;
130 dma_unmap_single(&bnad->pcidev->dev,
131 dma_unmap_addr(&unmap_array[unmap_cons],
132 dma_addr), skb_headlen(skb),
135 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
136 if (++unmap_cons >= unmap_q->q_depth)
139 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
140 dma_unmap_page(&bnad->pcidev->dev,
141 dma_unmap_addr(&unmap_array[unmap_cons],
143 skb_shinfo(skb)->frags[i].size,
145 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
147 if (++unmap_cons >= unmap_q->q_depth)
150 dev_kfree_skb_any(skb);
154 /* Data Path Handlers */
157 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
158 * Can be called in a) Interrupt context
163 bnad_free_txbufs(struct bnad *bnad,
166 u32 sent_packets = 0, sent_bytes = 0;
167 u16 wis, unmap_cons, updated_hw_cons;
168 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
169 struct bnad_skb_unmap *unmap_array;
174 * Just return if TX is stopped. This check is useful
175 * when bnad_free_txbufs() runs out of a tasklet scheduled
176 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
177 * but this routine runs actually after the cleanup has been
180 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
183 updated_hw_cons = *(tcb->hw_consumer_index);
185 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
186 updated_hw_cons, tcb->q_depth);
188 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
190 unmap_array = unmap_q->unmap_array;
191 unmap_cons = unmap_q->consumer_index;
193 prefetch(&unmap_array[unmap_cons + 1]);
195 skb = unmap_array[unmap_cons].skb;
197 unmap_array[unmap_cons].skb = NULL;
200 sent_bytes += skb->len;
201 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
203 dma_unmap_single(&bnad->pcidev->dev,
204 dma_unmap_addr(&unmap_array[unmap_cons],
205 dma_addr), skb_headlen(skb),
207 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
208 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
210 prefetch(&unmap_array[unmap_cons + 1]);
211 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
212 prefetch(&unmap_array[unmap_cons + 1]);
214 dma_unmap_page(&bnad->pcidev->dev,
215 dma_unmap_addr(&unmap_array[unmap_cons],
217 skb_shinfo(skb)->frags[i].size,
219 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
221 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
223 dev_kfree_skb_any(skb);
226 /* Update consumer pointers. */
227 tcb->consumer_index = updated_hw_cons;
228 unmap_q->consumer_index = unmap_cons;
230 tcb->txq->tx_packets += sent_packets;
231 tcb->txq->tx_bytes += sent_bytes;
236 /* Tx Free Tasklet function */
237 /* Frees for all the tcb's in all the Tx's */
239 * Scheduled from sending context, so that
240 * the fat Tx lock is not held for too long
241 * in the sending context.
244 bnad_tx_free_tasklet(unsigned long bnad_ptr)
246 struct bnad *bnad = (struct bnad *)bnad_ptr;
251 for (i = 0; i < bnad->num_tx; i++) {
252 for (j = 0; j < bnad->num_txq_per_tx; j++) {
253 tcb = bnad->tx_info[i].tcb[j];
256 if (((u16) (*tcb->hw_consumer_index) !=
257 tcb->consumer_index) &&
258 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
260 acked = bnad_free_txbufs(bnad, tcb);
261 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
263 bna_ib_ack(tcb->i_dbell, acked);
264 smp_mb__before_clear_bit();
265 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
267 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
270 if (netif_queue_stopped(bnad->netdev)) {
271 if (acked && netif_carrier_ok(bnad->netdev) &&
272 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
273 BNAD_NETIF_WAKE_THRESHOLD) {
274 netif_wake_queue(bnad->netdev);
276 /* Counters for individual TxQs? */
277 BNAD_UPDATE_CTR(bnad,
286 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
288 struct net_device *netdev = bnad->netdev;
291 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
294 sent = bnad_free_txbufs(bnad, tcb);
296 if (netif_queue_stopped(netdev) &&
297 netif_carrier_ok(netdev) &&
298 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
299 BNAD_NETIF_WAKE_THRESHOLD) {
300 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
301 netif_wake_queue(netdev);
302 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
307 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
308 bna_ib_ack(tcb->i_dbell, sent);
310 smp_mb__before_clear_bit();
311 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
316 /* MSIX Tx Completion Handler */
318 bnad_msix_tx(int irq, void *data)
320 struct bna_tcb *tcb = (struct bna_tcb *)data;
321 struct bnad *bnad = tcb->bnad;
329 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
331 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
333 rcb->producer_index = 0;
334 rcb->consumer_index = 0;
336 unmap_q->producer_index = 0;
337 unmap_q->consumer_index = 0;
341 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
343 struct bnad_unmap_q *unmap_q;
344 struct bnad_skb_unmap *unmap_array;
348 unmap_q = rcb->unmap_q;
349 unmap_array = unmap_q->unmap_array;
350 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
351 skb = unmap_array[unmap_cons].skb;
354 unmap_array[unmap_cons].skb = NULL;
355 dma_unmap_single(&bnad->pcidev->dev,
356 dma_unmap_addr(&unmap_array[unmap_cons],
358 rcb->rxq->buffer_size,
362 bnad_reset_rcb(bnad, rcb);
366 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
368 u16 to_alloc, alloced, unmap_prod, wi_range;
369 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
370 struct bnad_skb_unmap *unmap_array;
371 struct bna_rxq_entry *rxent;
377 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
379 unmap_array = unmap_q->unmap_array;
380 unmap_prod = unmap_q->producer_index;
382 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
386 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
389 skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
391 if (unlikely(!skb)) {
392 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
395 skb->dev = bnad->netdev;
396 skb_reserve(skb, NET_IP_ALIGN);
397 unmap_array[unmap_prod].skb = skb;
398 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
399 rcb->rxq->buffer_size,
401 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
403 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
404 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
412 if (likely(alloced)) {
413 unmap_q->producer_index = unmap_prod;
414 rcb->producer_index = unmap_prod;
416 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
417 bna_rxq_prod_indx_doorbell(rcb);
422 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
424 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
426 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
427 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
428 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
429 bnad_alloc_n_post_rxbufs(bnad, rcb);
430 smp_mb__before_clear_bit();
431 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
436 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
438 struct bna_cq_entry *cmpl, *next_cmpl;
439 struct bna_rcb *rcb = NULL;
440 unsigned int wi_range, packets = 0, wis = 0;
441 struct bnad_unmap_q *unmap_q;
442 struct bnad_skb_unmap *unmap_array;
444 u32 flags, unmap_cons;
445 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
446 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
448 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
451 prefetch(bnad->netdev);
452 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
454 BUG_ON(!(wi_range <= ccb->q_depth));
455 while (cmpl->valid && packets < budget) {
457 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
459 if (qid0 == cmpl->rxq_id)
464 unmap_q = rcb->unmap_q;
465 unmap_array = unmap_q->unmap_array;
466 unmap_cons = unmap_q->consumer_index;
468 skb = unmap_array[unmap_cons].skb;
470 unmap_array[unmap_cons].skb = NULL;
471 dma_unmap_single(&bnad->pcidev->dev,
472 dma_unmap_addr(&unmap_array[unmap_cons],
474 rcb->rxq->buffer_size,
476 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
478 /* Should be more efficient ? Performance ? */
479 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
482 if (likely(--wi_range))
483 next_cmpl = cmpl + 1;
485 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
487 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
488 next_cmpl, wi_range);
489 BUG_ON(!(wi_range <= ccb->q_depth));
493 flags = ntohl(cmpl->flags);
496 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
497 BNA_CQ_EF_TOO_LONG))) {
498 dev_kfree_skb_any(skb);
499 rcb->rxq->rx_packets_with_error++;
503 skb_put(skb, ntohs(cmpl->length));
505 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
506 (((flags & BNA_CQ_EF_IPV4) &&
507 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
508 (flags & BNA_CQ_EF_IPV6)) &&
509 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
510 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
511 skb->ip_summed = CHECKSUM_UNNECESSARY;
513 skb_checksum_none_assert(skb);
515 rcb->rxq->rx_packets++;
516 rcb->rxq->rx_bytes += skb->len;
517 skb->protocol = eth_type_trans(skb, bnad->netdev);
519 if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
520 struct bnad_rx_ctrl *rx_ctrl =
521 (struct bnad_rx_ctrl *)ccb->ctrl;
522 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
523 vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
524 ntohs(cmpl->vlan_tag), skb);
526 vlan_hwaccel_receive_skb(skb,
528 ntohs(cmpl->vlan_tag));
530 } else { /* Not VLAN tagged/stripped */
531 struct bnad_rx_ctrl *rx_ctrl =
532 (struct bnad_rx_ctrl *)ccb->ctrl;
533 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
534 napi_gro_receive(&rx_ctrl->napi, skb);
536 netif_receive_skb(skb);
544 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
547 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
548 bna_ib_ack(ccb->i_dbell, packets);
549 bnad_refill_rxq(bnad, ccb->rcb[0]);
551 bnad_refill_rxq(bnad, ccb->rcb[1]);
553 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
554 bna_ib_ack(ccb->i_dbell, 0);
561 bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
563 if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
566 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
567 bna_ib_ack(ccb->i_dbell, 0);
571 bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
575 /* Because of polling context */
576 spin_lock_irqsave(&bnad->bna_lock, flags);
577 bnad_enable_rx_irq_unsafe(ccb);
578 spin_unlock_irqrestore(&bnad->bna_lock, flags);
582 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
584 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
585 struct napi_struct *napi = &rx_ctrl->napi;
587 if (likely(napi_schedule_prep(napi))) {
588 bnad_disable_rx_irq(bnad, ccb);
589 __napi_schedule(napi);
591 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
594 /* MSIX Rx Path Handler */
596 bnad_msix_rx(int irq, void *data)
598 struct bna_ccb *ccb = (struct bna_ccb *)data;
599 struct bnad *bnad = ccb->bnad;
601 bnad_netif_rx_schedule_poll(bnad, ccb);
606 /* Interrupt handlers */
608 /* Mbox Interrupt Handlers */
610 bnad_msix_mbox_handler(int irq, void *data)
614 struct bnad *bnad = (struct bnad *)data;
616 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
619 spin_lock_irqsave(&bnad->bna_lock, flags);
621 bna_intr_status_get(&bnad->bna, intr_status);
623 if (BNA_IS_MBOX_ERR_INTR(intr_status))
624 bna_mbox_handler(&bnad->bna, intr_status);
626 spin_unlock_irqrestore(&bnad->bna_lock, flags);
632 bnad_isr(int irq, void *data)
637 struct bnad *bnad = (struct bnad *)data;
638 struct bnad_rx_info *rx_info;
639 struct bnad_rx_ctrl *rx_ctrl;
641 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
644 bna_intr_status_get(&bnad->bna, intr_status);
646 if (unlikely(!intr_status))
649 spin_lock_irqsave(&bnad->bna_lock, flags);
651 if (BNA_IS_MBOX_ERR_INTR(intr_status))
652 bna_mbox_handler(&bnad->bna, intr_status);
654 spin_unlock_irqrestore(&bnad->bna_lock, flags);
656 if (!BNA_IS_INTX_DATA_INTR(intr_status))
659 /* Process data interrupts */
661 for (i = 0; i < bnad->num_tx; i++) {
662 for (j = 0; j < bnad->num_txq_per_tx; j++)
663 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
666 for (i = 0; i < bnad->num_rx; i++) {
667 rx_info = &bnad->rx_info[i];
670 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
671 rx_ctrl = &rx_info->rx_ctrl[j];
673 bnad_netif_rx_schedule_poll(bnad,
681 * Called in interrupt / callback context
682 * with bna_lock held, so cfg_flags access is OK
685 bnad_enable_mbox_irq(struct bnad *bnad)
687 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
689 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
693 * Called with bnad->bna_lock held b'cos of
694 * bnad->cfg_flags access.
697 bnad_disable_mbox_irq(struct bnad *bnad)
699 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
701 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
705 bnad_set_netdev_perm_addr(struct bnad *bnad)
707 struct net_device *netdev = bnad->netdev;
709 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
710 if (is_zero_ether_addr(netdev->dev_addr))
711 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
714 /* Control Path Handlers */
718 bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
720 bnad_enable_mbox_irq(bnad);
724 bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
726 bnad_disable_mbox_irq(bnad);
730 bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
732 complete(&bnad->bnad_completions.ioc_comp);
733 bnad->bnad_completions.ioc_comp_status = status;
737 bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
739 complete(&bnad->bnad_completions.ioc_comp);
740 bnad->bnad_completions.ioc_comp_status = status;
744 bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
746 struct bnad *bnad = (struct bnad *)arg;
748 complete(&bnad->bnad_completions.port_comp);
750 netif_carrier_off(bnad->netdev);
754 bnad_cb_port_link_status(struct bnad *bnad,
755 enum bna_link_status link_status)
759 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
761 if (link_status == BNA_CEE_UP) {
762 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
763 BNAD_UPDATE_CTR(bnad, cee_up);
765 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
768 if (!netif_carrier_ok(bnad->netdev)) {
769 struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
772 pr_warn("bna: %s link up\n",
774 netif_carrier_on(bnad->netdev);
775 BNAD_UPDATE_CTR(bnad, link_toggle);
776 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
777 /* Force an immediate Transmit Schedule */
778 pr_info("bna: %s TX_STARTED\n",
780 netif_wake_queue(bnad->netdev);
781 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
783 netif_stop_queue(bnad->netdev);
784 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
788 if (netif_carrier_ok(bnad->netdev)) {
789 pr_warn("bna: %s link down\n",
791 netif_carrier_off(bnad->netdev);
792 BNAD_UPDATE_CTR(bnad, link_toggle);
798 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
799 enum bna_cb_status status)
801 struct bnad *bnad = (struct bnad *)arg;
803 complete(&bnad->bnad_completions.tx_comp);
807 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
809 struct bnad_tx_info *tx_info =
810 (struct bnad_tx_info *)tcb->txq->tx->priv;
811 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
813 tx_info->tcb[tcb->id] = tcb;
814 unmap_q->producer_index = 0;
815 unmap_q->consumer_index = 0;
816 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
820 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
822 struct bnad_tx_info *tx_info =
823 (struct bnad_tx_info *)tcb->txq->tx->priv;
824 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
826 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
829 bnad_free_all_txbufs(bnad, tcb);
831 unmap_q->producer_index = 0;
832 unmap_q->consumer_index = 0;
834 smp_mb__before_clear_bit();
835 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
837 tx_info->tcb[tcb->id] = NULL;
841 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
843 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
845 unmap_q->producer_index = 0;
846 unmap_q->consumer_index = 0;
847 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
851 bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
853 bnad_free_all_rxbufs(bnad, rcb);
857 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
859 struct bnad_rx_info *rx_info =
860 (struct bnad_rx_info *)ccb->cq->rx->priv;
862 rx_info->rx_ctrl[ccb->id].ccb = ccb;
863 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
867 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
869 struct bnad_rx_info *rx_info =
870 (struct bnad_rx_info *)ccb->cq->rx->priv;
872 rx_info->rx_ctrl[ccb->id].ccb = NULL;
876 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
878 struct bnad_tx_info *tx_info =
879 (struct bnad_tx_info *)tcb->txq->tx->priv;
881 if (tx_info != &bnad->tx_info[0])
884 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
885 netif_stop_queue(bnad->netdev);
886 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
890 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
892 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
894 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
897 clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
899 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
902 bnad_free_all_txbufs(bnad, tcb);
904 unmap_q->producer_index = 0;
905 unmap_q->consumer_index = 0;
907 smp_mb__before_clear_bit();
908 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
911 * Workaround for first device enable failure & we
912 * get a 0 MAC address. We try to get the MAC address
915 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
916 bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
917 bnad_set_netdev_perm_addr(bnad);
920 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
922 if (netif_carrier_ok(bnad->netdev)) {
923 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
924 netif_wake_queue(bnad->netdev);
925 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
930 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
932 /* Delay only once for the whole Tx Path Shutdown */
933 if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
934 mdelay(BNAD_TXRX_SYNC_MDELAY);
938 bnad_cb_rx_cleanup(struct bnad *bnad,
941 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
944 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
946 if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
947 mdelay(BNAD_TXRX_SYNC_MDELAY);
951 bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
953 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
955 clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
957 if (rcb == rcb->cq->ccb->rcb[0])
958 bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
960 bnad_free_all_rxbufs(bnad, rcb);
962 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
964 /* Now allocate & post buffers for this RCB */
965 /* !!Allocation in callback context */
966 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
967 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
968 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
969 bnad_alloc_n_post_rxbufs(bnad, rcb);
970 smp_mb__before_clear_bit();
971 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
976 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
977 enum bna_cb_status status)
979 struct bnad *bnad = (struct bnad *)arg;
981 complete(&bnad->bnad_completions.rx_comp);
985 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
986 enum bna_cb_status status)
988 bnad->bnad_completions.mcast_comp_status = status;
989 complete(&bnad->bnad_completions.mcast_comp);
993 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
994 struct bna_stats *stats)
996 if (status == BNA_CB_SUCCESS)
997 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
999 if (!netif_running(bnad->netdev) ||
1000 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1003 mod_timer(&bnad->stats_timer,
1004 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1007 /* Resource allocation, free functions */
1010 bnad_mem_free(struct bnad *bnad,
1011 struct bna_mem_info *mem_info)
1016 if (mem_info->mdl == NULL)
1019 for (i = 0; i < mem_info->num; i++) {
1020 if (mem_info->mdl[i].kva != NULL) {
1021 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1022 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1024 dma_free_coherent(&bnad->pcidev->dev,
1025 mem_info->mdl[i].len,
1026 mem_info->mdl[i].kva, dma_pa);
1028 kfree(mem_info->mdl[i].kva);
1031 kfree(mem_info->mdl);
1032 mem_info->mdl = NULL;
1036 bnad_mem_alloc(struct bnad *bnad,
1037 struct bna_mem_info *mem_info)
1042 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1043 mem_info->mdl = NULL;
1047 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1049 if (mem_info->mdl == NULL)
1052 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1053 for (i = 0; i < mem_info->num; i++) {
1054 mem_info->mdl[i].len = mem_info->len;
1055 mem_info->mdl[i].kva =
1056 dma_alloc_coherent(&bnad->pcidev->dev,
1057 mem_info->len, &dma_pa,
1060 if (mem_info->mdl[i].kva == NULL)
1063 BNA_SET_DMA_ADDR(dma_pa,
1064 &(mem_info->mdl[i].dma));
1067 for (i = 0; i < mem_info->num; i++) {
1068 mem_info->mdl[i].len = mem_info->len;
1069 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1071 if (mem_info->mdl[i].kva == NULL)
1079 bnad_mem_free(bnad, mem_info);
1083 /* Free IRQ for Mailbox */
1085 bnad_mbox_irq_free(struct bnad *bnad,
1086 struct bna_intr_info *intr_info)
1089 unsigned long flags;
1091 if (intr_info->idl == NULL)
1094 spin_lock_irqsave(&bnad->bna_lock, flags);
1095 bnad_disable_mbox_irq(bnad);
1096 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1098 irq = BNAD_GET_MBOX_IRQ(bnad);
1099 free_irq(irq, bnad);
1101 kfree(intr_info->idl);
1105 * Allocates IRQ for Mailbox, but keep it disabled
1106 * This will be enabled once we get the mbox enable callback
1110 bnad_mbox_irq_alloc(struct bnad *bnad,
1111 struct bna_intr_info *intr_info)
1114 unsigned long irq_flags = 0, flags;
1116 irq_handler_t irq_handler;
1118 /* Mbox should use only 1 vector */
1120 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1121 if (!intr_info->idl)
1124 spin_lock_irqsave(&bnad->bna_lock, flags);
1125 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1126 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1127 irq = bnad->msix_table[bnad->msix_num - 1].vector;
1128 intr_info->intr_type = BNA_INTR_T_MSIX;
1129 intr_info->idl[0].vector = bnad->msix_num - 1;
1131 irq_handler = (irq_handler_t)bnad_isr;
1132 irq = bnad->pcidev->irq;
1133 irq_flags = IRQF_SHARED;
1134 intr_info->intr_type = BNA_INTR_T_INTX;
1135 /* intr_info->idl.vector = 0 ? */
1137 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1139 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1142 * Set the Mbox IRQ disable flag, so that the IRQ handler
1143 * called from request_irq() for SHARED IRQs do not execute
1145 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1147 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1149 err = request_irq(irq, irq_handler, flags,
1150 bnad->mbox_irq_name, bnad);
1153 kfree(intr_info->idl);
1154 intr_info->idl = NULL;
1161 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1163 kfree(intr_info->idl);
1164 intr_info->idl = NULL;
1167 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1169 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1170 uint txrx_id, struct bna_intr_info *intr_info)
1172 int i, vector_start = 0;
1174 unsigned long flags;
1176 spin_lock_irqsave(&bnad->bna_lock, flags);
1177 cfg_flags = bnad->cfg_flags;
1178 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1180 if (cfg_flags & BNAD_CF_MSIX) {
1181 intr_info->intr_type = BNA_INTR_T_MSIX;
1182 intr_info->idl = kcalloc(intr_info->num,
1183 sizeof(struct bna_intr_descr),
1185 if (!intr_info->idl)
1190 vector_start = txrx_id;
1194 vector_start = bnad->num_tx * bnad->num_txq_per_tx +
1202 for (i = 0; i < intr_info->num; i++)
1203 intr_info->idl[i].vector = vector_start + i;
1205 intr_info->intr_type = BNA_INTR_T_INTX;
1207 intr_info->idl = kcalloc(intr_info->num,
1208 sizeof(struct bna_intr_descr),
1210 if (!intr_info->idl)
1215 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
1219 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
1227 * NOTE: Should be called for MSIX only
1228 * Unregisters Tx MSIX vector(s) from the kernel
1231 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1237 for (i = 0; i < num_txqs; i++) {
1238 if (tx_info->tcb[i] == NULL)
1241 vector_num = tx_info->tcb[i]->intr_vector;
1242 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1247 * NOTE: Should be called for MSIX only
1248 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1251 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1252 uint tx_id, int num_txqs)
1258 for (i = 0; i < num_txqs; i++) {
1259 vector_num = tx_info->tcb[i]->intr_vector;
1260 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1261 tx_id + tx_info->tcb[i]->id);
1262 err = request_irq(bnad->msix_table[vector_num].vector,
1263 (irq_handler_t)bnad_msix_tx, 0,
1264 tx_info->tcb[i]->name,
1274 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1279 * NOTE: Should be called for MSIX only
1280 * Unregisters Rx MSIX vector(s) from the kernel
1283 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1289 for (i = 0; i < num_rxps; i++) {
1290 if (rx_info->rx_ctrl[i].ccb == NULL)
1293 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1294 free_irq(bnad->msix_table[vector_num].vector,
1295 rx_info->rx_ctrl[i].ccb);
1300 * NOTE: Should be called for MSIX only
1301 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1304 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1305 uint rx_id, int num_rxps)
1311 for (i = 0; i < num_rxps; i++) {
1312 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1313 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1315 rx_id + rx_info->rx_ctrl[i].ccb->id);
1316 err = request_irq(bnad->msix_table[vector_num].vector,
1317 (irq_handler_t)bnad_msix_rx, 0,
1318 rx_info->rx_ctrl[i].ccb->name,
1319 rx_info->rx_ctrl[i].ccb);
1328 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1332 /* Free Tx object Resources */
1334 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1338 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1339 if (res_info[i].res_type == BNA_RES_T_MEM)
1340 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1341 else if (res_info[i].res_type == BNA_RES_T_INTR)
1342 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1346 /* Allocates memory and interrupt resources for Tx object */
1348 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1353 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1354 if (res_info[i].res_type == BNA_RES_T_MEM)
1355 err = bnad_mem_alloc(bnad,
1356 &res_info[i].res_u.mem_info);
1357 else if (res_info[i].res_type == BNA_RES_T_INTR)
1358 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1359 &res_info[i].res_u.intr_info);
1366 bnad_tx_res_free(bnad, res_info);
1370 /* Free Rx object Resources */
1372 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1376 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1377 if (res_info[i].res_type == BNA_RES_T_MEM)
1378 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1379 else if (res_info[i].res_type == BNA_RES_T_INTR)
1380 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1384 /* Allocates memory and interrupt resources for Rx object */
1386 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1391 /* All memory needs to be allocated before setup_ccbs */
1392 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1393 if (res_info[i].res_type == BNA_RES_T_MEM)
1394 err = bnad_mem_alloc(bnad,
1395 &res_info[i].res_u.mem_info);
1396 else if (res_info[i].res_type == BNA_RES_T_INTR)
1397 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1398 &res_info[i].res_u.intr_info);
1405 bnad_rx_res_free(bnad, res_info);
1409 /* Timer callbacks */
1412 bnad_ioc_timeout(unsigned long data)
1414 struct bnad *bnad = (struct bnad *)data;
1415 unsigned long flags;
1417 spin_lock_irqsave(&bnad->bna_lock, flags);
1418 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
1419 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423 bnad_ioc_hb_check(unsigned long data)
1425 struct bnad *bnad = (struct bnad *)data;
1426 unsigned long flags;
1428 spin_lock_irqsave(&bnad->bna_lock, flags);
1429 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
1430 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1434 bnad_iocpf_timeout(unsigned long data)
1436 struct bnad *bnad = (struct bnad *)data;
1437 unsigned long flags;
1439 spin_lock_irqsave(&bnad->bna_lock, flags);
1440 bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
1441 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1445 bnad_iocpf_sem_timeout(unsigned long data)
1447 struct bnad *bnad = (struct bnad *)data;
1448 unsigned long flags;
1450 spin_lock_irqsave(&bnad->bna_lock, flags);
1451 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
1452 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1456 * All timer routines use bnad->bna_lock to protect against
1457 * the following race, which may occur in case of no locking:
1465 /* b) Dynamic Interrupt Moderation Timer */
1467 bnad_dim_timeout(unsigned long data)
1469 struct bnad *bnad = (struct bnad *)data;
1470 struct bnad_rx_info *rx_info;
1471 struct bnad_rx_ctrl *rx_ctrl;
1473 unsigned long flags;
1475 if (!netif_carrier_ok(bnad->netdev))
1478 spin_lock_irqsave(&bnad->bna_lock, flags);
1479 for (i = 0; i < bnad->num_rx; i++) {
1480 rx_info = &bnad->rx_info[i];
1483 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1484 rx_ctrl = &rx_info->rx_ctrl[j];
1487 bna_rx_dim_update(rx_ctrl->ccb);
1491 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1492 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1493 mod_timer(&bnad->dim_timer,
1494 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1495 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1498 /* c) Statistics Timer */
1500 bnad_stats_timeout(unsigned long data)
1502 struct bnad *bnad = (struct bnad *)data;
1503 unsigned long flags;
1505 if (!netif_running(bnad->netdev) ||
1506 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1509 spin_lock_irqsave(&bnad->bna_lock, flags);
1510 bna_stats_get(&bnad->bna);
1511 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1515 * Set up timer for DIM
1516 * Called with bnad->bna_lock held
1519 bnad_dim_timer_start(struct bnad *bnad)
1521 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1522 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1523 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1524 (unsigned long)bnad);
1525 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1526 mod_timer(&bnad->dim_timer,
1527 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1532 * Set up timer for statistics
1533 * Called with mutex_lock(&bnad->conf_mutex) held
1536 bnad_stats_timer_start(struct bnad *bnad)
1538 unsigned long flags;
1540 spin_lock_irqsave(&bnad->bna_lock, flags);
1541 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1542 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1543 (unsigned long)bnad);
1544 mod_timer(&bnad->stats_timer,
1545 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1547 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1551 * Stops the stats timer
1552 * Called with mutex_lock(&bnad->conf_mutex) held
1555 bnad_stats_timer_stop(struct bnad *bnad)
1558 unsigned long flags;
1560 spin_lock_irqsave(&bnad->bna_lock, flags);
1561 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1563 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1565 del_timer_sync(&bnad->stats_timer);
1571 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1573 int i = 1; /* Index 0 has broadcast address */
1574 struct netdev_hw_addr *mc_addr;
1576 netdev_for_each_mc_addr(mc_addr, netdev) {
1577 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1584 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1586 struct bnad_rx_ctrl *rx_ctrl =
1587 container_of(napi, struct bnad_rx_ctrl, napi);
1588 struct bna_ccb *ccb;
1596 if (!netif_carrier_ok(bnad->netdev))
1599 rcvd = bnad_poll_cq(bnad, ccb, budget);
1604 napi_complete((napi));
1606 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1608 bnad_enable_rx_irq(bnad, ccb);
1613 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1615 struct bnad_rx_ctrl *rx_ctrl;
1618 /* Initialize & enable NAPI */
1619 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1620 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1622 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1623 bnad_napi_poll_rx, 64);
1625 napi_enable(&rx_ctrl->napi);
1630 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1634 /* First disable and then clean up */
1635 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1636 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1637 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1641 /* Should be held with conf_lock held */
1643 bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1645 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1646 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1647 unsigned long flags;
1652 init_completion(&bnad->bnad_completions.tx_comp);
1653 spin_lock_irqsave(&bnad->bna_lock, flags);
1654 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1655 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1656 wait_for_completion(&bnad->bnad_completions.tx_comp);
1658 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1659 bnad_tx_msix_unregister(bnad, tx_info,
1660 bnad->num_txq_per_tx);
1662 spin_lock_irqsave(&bnad->bna_lock, flags);
1663 bna_tx_destroy(tx_info->tx);
1664 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1669 tasklet_kill(&bnad->tx_free_tasklet);
1671 bnad_tx_res_free(bnad, res_info);
1674 /* Should be held with conf_lock held */
1676 bnad_setup_tx(struct bnad *bnad, uint tx_id)
1679 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1680 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1681 struct bna_intr_info *intr_info =
1682 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1683 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1684 struct bna_tx_event_cbfn tx_cbfn;
1686 unsigned long flags;
1688 /* Initialize the Tx object configuration */
1689 tx_config->num_txq = bnad->num_txq_per_tx;
1690 tx_config->txq_depth = bnad->txq_depth;
1691 tx_config->tx_type = BNA_TX_T_REGULAR;
1693 /* Initialize the tx event handlers */
1694 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1695 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1696 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1697 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1698 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1700 /* Get BNA's resource requirement for one tx object */
1701 spin_lock_irqsave(&bnad->bna_lock, flags);
1702 bna_tx_res_req(bnad->num_txq_per_tx,
1703 bnad->txq_depth, res_info);
1704 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1706 /* Fill Unmap Q memory requirements */
1707 BNAD_FILL_UNMAPQ_MEM_REQ(
1708 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1709 bnad->num_txq_per_tx,
1710 BNAD_TX_UNMAPQ_DEPTH);
1712 /* Allocate resources */
1713 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1717 /* Ask BNA to create one Tx object, supplying required resources */
1718 spin_lock_irqsave(&bnad->bna_lock, flags);
1719 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1721 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1726 /* Register ISR for the Tx object */
1727 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1728 err = bnad_tx_msix_register(bnad, tx_info,
1729 tx_id, bnad->num_txq_per_tx);
1734 spin_lock_irqsave(&bnad->bna_lock, flags);
1736 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1741 bnad_tx_res_free(bnad, res_info);
1745 /* Setup the rx config for bna_rx_create */
1746 /* bnad decides the configuration */
1748 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1750 rx_config->rx_type = BNA_RX_T_REGULAR;
1751 rx_config->num_paths = bnad->num_rxp_per_rx;
1753 if (bnad->num_rxp_per_rx > 1) {
1754 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1755 rx_config->rss_config.hash_type =
1760 rx_config->rss_config.hash_mask =
1761 bnad->num_rxp_per_rx - 1;
1762 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1763 sizeof(rx_config->rss_config.toeplitz_hash_key));
1765 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1766 memset(&rx_config->rss_config, 0,
1767 sizeof(rx_config->rss_config));
1769 rx_config->rxp_type = BNA_RXP_SLR;
1770 rx_config->q_depth = bnad->rxq_depth;
1772 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1774 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1777 /* Called with mutex_lock(&bnad->conf_mutex) held */
1779 bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1781 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1782 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1783 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1784 unsigned long flags;
1785 int dim_timer_del = 0;
1791 spin_lock_irqsave(&bnad->bna_lock, flags);
1792 dim_timer_del = bnad_dim_timer_running(bnad);
1794 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1795 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1797 del_timer_sync(&bnad->dim_timer);
1800 bnad_napi_disable(bnad, rx_id);
1802 init_completion(&bnad->bnad_completions.rx_comp);
1803 spin_lock_irqsave(&bnad->bna_lock, flags);
1804 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1805 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1806 wait_for_completion(&bnad->bnad_completions.rx_comp);
1808 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1809 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1811 spin_lock_irqsave(&bnad->bna_lock, flags);
1812 bna_rx_destroy(rx_info->rx);
1813 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1817 bnad_rx_res_free(bnad, res_info);
1820 /* Called with mutex_lock(&bnad->conf_mutex) held */
1822 bnad_setup_rx(struct bnad *bnad, uint rx_id)
1825 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1826 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1827 struct bna_intr_info *intr_info =
1828 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1829 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1830 struct bna_rx_event_cbfn rx_cbfn;
1832 unsigned long flags;
1834 /* Initialize the Rx object configuration */
1835 bnad_init_rx_config(bnad, rx_config);
1837 /* Initialize the Rx event handlers */
1838 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1839 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
1840 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1841 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1842 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1843 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1845 /* Get BNA's resource requirement for one Rx object */
1846 spin_lock_irqsave(&bnad->bna_lock, flags);
1847 bna_rx_res_req(rx_config, res_info);
1848 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1850 /* Fill Unmap Q memory requirements */
1851 BNAD_FILL_UNMAPQ_MEM_REQ(
1852 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1853 rx_config->num_paths +
1854 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1855 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1857 /* Allocate resource */
1858 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1862 /* Ask BNA to create one Rx object, supplying required resources */
1863 spin_lock_irqsave(&bnad->bna_lock, flags);
1864 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1866 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1871 /* Register ISR for the Rx object */
1872 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1873 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1874 rx_config->num_paths);
1880 bnad_napi_enable(bnad, rx_id);
1882 spin_lock_irqsave(&bnad->bna_lock, flags);
1884 /* Set up Dynamic Interrupt Moderation Vector */
1885 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1886 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1888 /* Enable VLAN filtering only on the default Rx */
1889 bna_rx_vlanfilter_enable(rx);
1891 /* Start the DIM timer */
1892 bnad_dim_timer_start(bnad);
1896 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1901 bnad_cleanup_rx(bnad, rx_id);
1905 /* Called with conf_lock & bnad->bna_lock held */
1907 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1909 struct bnad_tx_info *tx_info;
1911 tx_info = &bnad->tx_info[0];
1915 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1918 /* Called with conf_lock & bnad->bna_lock held */
1920 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1922 struct bnad_rx_info *rx_info;
1925 for (i = 0; i < bnad->num_rx; i++) {
1926 rx_info = &bnad->rx_info[i];
1929 bna_rx_coalescing_timeo_set(rx_info->rx,
1930 bnad->rx_coalescing_timeo);
1935 * Called with bnad->bna_lock held
1938 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1942 if (!is_valid_ether_addr(mac_addr))
1943 return -EADDRNOTAVAIL;
1945 /* If datapath is down, pretend everything went through */
1946 if (!bnad->rx_info[0].rx)
1949 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1950 if (ret != BNA_CB_SUCCESS)
1951 return -EADDRNOTAVAIL;
1956 /* Should be called with conf_lock held */
1958 bnad_enable_default_bcast(struct bnad *bnad)
1960 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1962 unsigned long flags;
1964 init_completion(&bnad->bnad_completions.mcast_comp);
1966 spin_lock_irqsave(&bnad->bna_lock, flags);
1967 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1968 bnad_cb_rx_mcast_add);
1969 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1971 if (ret == BNA_CB_SUCCESS)
1972 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1976 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1982 /* Called with bnad_conf_lock() held */
1984 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
1987 unsigned long flags;
1989 if (!bnad->vlan_grp)
1992 BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
1994 for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) {
1995 if (!vlan_group_get_device(bnad->vlan_grp, vlan_id))
1997 spin_lock_irqsave(&bnad->bna_lock, flags);
1998 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id);
1999 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2003 /* Statistics utilities */
2005 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2009 for (i = 0; i < bnad->num_rx; i++) {
2010 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2011 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2012 stats->rx_packets += bnad->rx_info[i].
2013 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2014 stats->rx_bytes += bnad->rx_info[i].
2015 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2016 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2017 bnad->rx_info[i].rx_ctrl[j].ccb->
2019 stats->rx_packets +=
2020 bnad->rx_info[i].rx_ctrl[j].
2021 ccb->rcb[1]->rxq->rx_packets;
2023 bnad->rx_info[i].rx_ctrl[j].
2024 ccb->rcb[1]->rxq->rx_bytes;
2029 for (i = 0; i < bnad->num_tx; i++) {
2030 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2031 if (bnad->tx_info[i].tcb[j]) {
2032 stats->tx_packets +=
2033 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2035 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2042 * Must be called with the bna_lock held.
2045 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2047 struct bfi_ll_stats_mac *mac_stats;
2051 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
2053 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2054 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2055 mac_stats->rx_undersize;
2056 stats->tx_errors = mac_stats->tx_fcs_error +
2057 mac_stats->tx_undersize;
2058 stats->rx_dropped = mac_stats->rx_drop;
2059 stats->tx_dropped = mac_stats->tx_drop;
2060 stats->multicast = mac_stats->rx_multicast;
2061 stats->collisions = mac_stats->tx_total_collision;
2063 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2065 /* receive ring buffer overflow ?? */
2067 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2068 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2069 /* recv'r fifo overrun */
2070 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2071 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2072 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2074 stats->rx_fifo_errors +=
2075 bnad->stats.bna_stats->
2076 hw_stats->rxf_stats[i].frame_drops;
2084 bnad_mbox_irq_sync(struct bnad *bnad)
2087 unsigned long flags;
2089 spin_lock_irqsave(&bnad->bna_lock, flags);
2090 if (bnad->cfg_flags & BNAD_CF_MSIX)
2091 irq = bnad->msix_table[bnad->msix_num - 1].vector;
2093 irq = bnad->pcidev->irq;
2094 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2096 synchronize_irq(irq);
2099 /* Utility used by bnad_start_xmit, for doing TSO */
2101 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2105 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2106 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2107 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2108 if (skb_header_cloned(skb)) {
2109 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2111 BNAD_UPDATE_CTR(bnad, tso_err);
2117 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2118 * excluding the length field.
2120 if (skb->protocol == htons(ETH_P_IP)) {
2121 struct iphdr *iph = ip_hdr(skb);
2123 /* Do we really need these? */
2127 tcp_hdr(skb)->check =
2128 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2130 BNAD_UPDATE_CTR(bnad, tso4);
2132 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2134 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2135 ipv6h->payload_len = 0;
2136 tcp_hdr(skb)->check =
2137 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2139 BNAD_UPDATE_CTR(bnad, tso6);
2146 * Initialize Q numbers depending on Rx Paths
2147 * Called with bnad->bna_lock held, because of cfg_flags
2151 bnad_q_num_init(struct bnad *bnad)
2155 rxps = min((uint)num_online_cpus(),
2156 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2158 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2159 rxps = 1; /* INTx */
2163 bnad->num_rxp_per_rx = rxps;
2164 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2168 * Adjusts the Q numbers, given a number of msix vectors
2169 * Give preference to RSS as opposed to Tx priority Queues,
2170 * in such a case, just use 1 Tx Q
2171 * Called with bnad->bna_lock held b'cos of cfg_flags access
2174 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2176 bnad->num_txq_per_tx = 1;
2177 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2178 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2179 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2180 bnad->num_rxp_per_rx = msix_vectors -
2181 (bnad->num_tx * bnad->num_txq_per_tx) -
2182 BNAD_MAILBOX_MSIX_VECTORS;
2184 bnad->num_rxp_per_rx = 1;
2187 /* Enable / disable device */
2189 bnad_device_disable(struct bnad *bnad)
2191 unsigned long flags;
2193 init_completion(&bnad->bnad_completions.ioc_comp);
2195 spin_lock_irqsave(&bnad->bna_lock, flags);
2196 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2197 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2199 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2203 bnad_device_enable(struct bnad *bnad)
2206 unsigned long flags;
2208 init_completion(&bnad->bnad_completions.ioc_comp);
2210 spin_lock_irqsave(&bnad->bna_lock, flags);
2211 bna_device_enable(&bnad->bna.device);
2212 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2214 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2216 if (bnad->bnad_completions.ioc_comp_status)
2217 err = bnad->bnad_completions.ioc_comp_status;
2222 /* Free BNA resources */
2224 bnad_res_free(struct bnad *bnad)
2227 struct bna_res_info *res_info = &bnad->res_info[0];
2229 for (i = 0; i < BNA_RES_T_MAX; i++) {
2230 if (res_info[i].res_type == BNA_RES_T_MEM)
2231 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2233 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2237 /* Allocates memory and interrupt resources for BNA */
2239 bnad_res_alloc(struct bnad *bnad)
2242 struct bna_res_info *res_info = &bnad->res_info[0];
2244 for (i = 0; i < BNA_RES_T_MAX; i++) {
2245 if (res_info[i].res_type == BNA_RES_T_MEM)
2246 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2248 err = bnad_mbox_irq_alloc(bnad,
2249 &res_info[i].res_u.intr_info);
2256 bnad_res_free(bnad);
2260 /* Interrupt enable / disable */
2262 bnad_enable_msix(struct bnad *bnad)
2265 unsigned long flags;
2267 spin_lock_irqsave(&bnad->bna_lock, flags);
2268 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2269 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2272 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2274 if (bnad->msix_table)
2278 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2280 if (!bnad->msix_table)
2283 for (i = 0; i < bnad->msix_num; i++)
2284 bnad->msix_table[i].entry = i;
2286 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2288 /* Not enough MSI-X vectors. */
2290 spin_lock_irqsave(&bnad->bna_lock, flags);
2291 /* ret = #of vectors that we got */
2292 bnad_q_num_adjust(bnad, ret);
2293 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2295 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2297 * bnad->num_rxp_per_rx) +
2298 BNAD_MAILBOX_MSIX_VECTORS;
2300 /* Try once more with adjusted numbers */
2301 /* If this fails, fall back to INTx */
2302 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2313 kfree(bnad->msix_table);
2314 bnad->msix_table = NULL;
2316 spin_lock_irqsave(&bnad->bna_lock, flags);
2317 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2318 bnad_q_num_init(bnad);
2319 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2323 bnad_disable_msix(struct bnad *bnad)
2326 unsigned long flags;
2328 spin_lock_irqsave(&bnad->bna_lock, flags);
2329 cfg_flags = bnad->cfg_flags;
2330 if (bnad->cfg_flags & BNAD_CF_MSIX)
2331 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2332 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2334 if (cfg_flags & BNAD_CF_MSIX) {
2335 pci_disable_msix(bnad->pcidev);
2336 kfree(bnad->msix_table);
2337 bnad->msix_table = NULL;
2341 /* Netdev entry points */
2343 bnad_open(struct net_device *netdev)
2346 struct bnad *bnad = netdev_priv(netdev);
2347 struct bna_pause_config pause_config;
2349 unsigned long flags;
2351 mutex_lock(&bnad->conf_mutex);
2354 err = bnad_setup_tx(bnad, 0);
2359 err = bnad_setup_rx(bnad, 0);
2364 pause_config.tx_pause = 0;
2365 pause_config.rx_pause = 0;
2367 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2369 spin_lock_irqsave(&bnad->bna_lock, flags);
2370 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2371 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2372 bna_port_enable(&bnad->bna.port);
2373 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2375 /* Enable broadcast */
2376 bnad_enable_default_bcast(bnad);
2378 /* Restore VLANs, if any */
2379 bnad_restore_vlans(bnad, 0);
2381 /* Set the UCAST address */
2382 spin_lock_irqsave(&bnad->bna_lock, flags);
2383 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2384 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2386 /* Start the stats timer */
2387 bnad_stats_timer_start(bnad);
2389 mutex_unlock(&bnad->conf_mutex);
2394 bnad_cleanup_tx(bnad, 0);
2397 mutex_unlock(&bnad->conf_mutex);
2402 bnad_stop(struct net_device *netdev)
2404 struct bnad *bnad = netdev_priv(netdev);
2405 unsigned long flags;
2407 mutex_lock(&bnad->conf_mutex);
2409 /* Stop the stats timer */
2410 bnad_stats_timer_stop(bnad);
2412 init_completion(&bnad->bnad_completions.port_comp);
2414 spin_lock_irqsave(&bnad->bna_lock, flags);
2415 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2416 bnad_cb_port_disabled);
2417 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2419 wait_for_completion(&bnad->bnad_completions.port_comp);
2421 bnad_cleanup_tx(bnad, 0);
2422 bnad_cleanup_rx(bnad, 0);
2424 /* Synchronize mailbox IRQ */
2425 bnad_mbox_irq_sync(bnad);
2427 mutex_unlock(&bnad->conf_mutex);
2434 * bnad_start_xmit : Netdev entry point for Transmit
2435 * Called under lock held by net_device
2438 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2440 struct bnad *bnad = netdev_priv(netdev);
2442 u16 txq_prod, vlan_tag = 0;
2443 u32 unmap_prod, wis, wis_used, wi_range;
2444 u32 vectors, vect_id, i, acked;
2448 struct bnad_tx_info *tx_info;
2449 struct bna_tcb *tcb;
2450 struct bnad_unmap_q *unmap_q;
2451 dma_addr_t dma_addr;
2452 struct bna_txq_entry *txqent;
2453 bna_txq_wi_ctrl_flag_t flags;
2456 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2458 return NETDEV_TX_OK;
2463 tx_info = &bnad->tx_info[tx_id];
2464 tcb = tx_info->tcb[tx_id];
2465 unmap_q = tcb->unmap_q;
2468 * Takes care of the Tx that is scheduled between clearing the flag
2469 * and the netif_stop_queue() call.
2471 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2473 return NETDEV_TX_OK;
2476 vectors = 1 + skb_shinfo(skb)->nr_frags;
2477 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2479 return NETDEV_TX_OK;
2481 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2484 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2485 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2486 if ((u16) (*tcb->hw_consumer_index) !=
2487 tcb->consumer_index &&
2488 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2489 acked = bnad_free_txbufs(bnad, tcb);
2490 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2491 bna_ib_ack(tcb->i_dbell, acked);
2492 smp_mb__before_clear_bit();
2493 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2495 netif_stop_queue(netdev);
2496 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2501 * Check again to deal with race condition between
2502 * netif_stop_queue here, and netif_wake_queue in
2503 * interrupt handler which is not inside netif tx lock.
2506 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2507 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2508 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2509 return NETDEV_TX_BUSY;
2511 netif_wake_queue(netdev);
2512 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2516 unmap_prod = unmap_q->producer_index;
2521 txq_prod = tcb->producer_index;
2522 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2523 BUG_ON(!(wi_range <= tcb->q_depth));
2524 txqent->hdr.wi.reserved = 0;
2525 txqent->hdr.wi.num_vectors = vectors;
2526 txqent->hdr.wi.opcode =
2527 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2530 if (vlan_tx_tag_present(skb)) {
2531 vlan_tag = (u16) vlan_tx_tag_get(skb);
2532 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2534 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2536 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2537 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2540 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2542 if (skb_is_gso(skb)) {
2543 err = bnad_tso_prepare(bnad, skb);
2546 return NETDEV_TX_OK;
2548 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2549 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2550 txqent->hdr.wi.l4_hdr_size_n_offset =
2551 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2552 (tcp_hdrlen(skb) >> 2,
2553 skb_transport_offset(skb)));
2554 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2557 txqent->hdr.wi.lso_mss = 0;
2559 if (skb->protocol == htons(ETH_P_IP))
2560 proto = ip_hdr(skb)->protocol;
2561 else if (skb->protocol == htons(ETH_P_IPV6)) {
2562 /* nexthdr may not be TCP immediately. */
2563 proto = ipv6_hdr(skb)->nexthdr;
2565 if (proto == IPPROTO_TCP) {
2566 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2567 txqent->hdr.wi.l4_hdr_size_n_offset =
2568 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2569 (0, skb_transport_offset(skb)));
2571 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2573 BUG_ON(!(skb_headlen(skb) >=
2574 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2576 } else if (proto == IPPROTO_UDP) {
2577 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2578 txqent->hdr.wi.l4_hdr_size_n_offset =
2579 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2580 (0, skb_transport_offset(skb)));
2582 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2584 BUG_ON(!(skb_headlen(skb) >=
2585 skb_transport_offset(skb) +
2586 sizeof(struct udphdr)));
2588 err = skb_checksum_help(skb);
2589 BNAD_UPDATE_CTR(bnad, csum_help);
2592 BNAD_UPDATE_CTR(bnad, csum_help_err);
2593 return NETDEV_TX_OK;
2597 txqent->hdr.wi.lso_mss = 0;
2598 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2601 txqent->hdr.wi.flags = htons(flags);
2603 txqent->hdr.wi.frame_length = htonl(skb->len);
2605 unmap_q->unmap_array[unmap_prod].skb = skb;
2606 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2607 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2608 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2609 skb_headlen(skb), DMA_TO_DEVICE);
2610 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2613 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2614 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2616 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2617 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2618 u32 size = frag->size;
2620 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2625 BNA_QE_INDX_ADD(txq_prod, wis_used,
2628 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2630 BUG_ON(!(wi_range <= tcb->q_depth));
2633 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2636 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2637 txqent->vector[vect_id].length = htons(size);
2638 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2639 frag->page_offset, size, DMA_TO_DEVICE);
2640 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2642 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2643 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2646 unmap_q->producer_index = unmap_prod;
2647 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2648 tcb->producer_index = txq_prod;
2652 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2653 return NETDEV_TX_OK;
2655 bna_txq_prod_indx_doorbell(tcb);
2657 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2658 tasklet_schedule(&bnad->tx_free_tasklet);
2660 return NETDEV_TX_OK;
2664 * Used spin_lock to synchronize reading of stats structures, which
2665 * is written by BNA under the same lock.
2667 static struct rtnl_link_stats64 *
2668 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2670 struct bnad *bnad = netdev_priv(netdev);
2671 unsigned long flags;
2673 spin_lock_irqsave(&bnad->bna_lock, flags);
2675 bnad_netdev_qstats_fill(bnad, stats);
2676 bnad_netdev_hwstats_fill(bnad, stats);
2678 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2684 bnad_set_rx_mode(struct net_device *netdev)
2686 struct bnad *bnad = netdev_priv(netdev);
2687 u32 new_mask, valid_mask;
2688 unsigned long flags;
2690 spin_lock_irqsave(&bnad->bna_lock, flags);
2692 new_mask = valid_mask = 0;
2694 if (netdev->flags & IFF_PROMISC) {
2695 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2696 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2697 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2698 bnad->cfg_flags |= BNAD_CF_PROMISC;
2701 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2702 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2703 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2704 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2708 if (netdev->flags & IFF_ALLMULTI) {
2709 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2710 new_mask |= BNA_RXMODE_ALLMULTI;
2711 valid_mask |= BNA_RXMODE_ALLMULTI;
2712 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2715 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2716 new_mask &= ~BNA_RXMODE_ALLMULTI;
2717 valid_mask |= BNA_RXMODE_ALLMULTI;
2718 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2722 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2724 if (!netdev_mc_empty(netdev)) {
2726 int mc_count = netdev_mc_count(netdev);
2728 /* Index 0 holds the broadcast address */
2730 kzalloc((mc_count + 1) * ETH_ALEN,
2735 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2737 /* Copy rest of the MC addresses */
2738 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2740 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2743 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2747 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2751 * bna_lock is used to sync writes to netdev->addr
2752 * conf_lock cannot be used since this call may be made
2753 * in a non-blocking context.
2756 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2759 struct bnad *bnad = netdev_priv(netdev);
2760 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2761 unsigned long flags;
2763 spin_lock_irqsave(&bnad->bna_lock, flags);
2765 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2768 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2770 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2776 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2779 unsigned long flags;
2781 struct bnad *bnad = netdev_priv(netdev);
2783 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2786 mutex_lock(&bnad->conf_mutex);
2788 netdev->mtu = new_mtu;
2790 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2792 spin_lock_irqsave(&bnad->bna_lock, flags);
2793 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2794 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2796 mutex_unlock(&bnad->conf_mutex);
2801 bnad_vlan_rx_register(struct net_device *netdev,
2802 struct vlan_group *vlan_grp)
2804 struct bnad *bnad = netdev_priv(netdev);
2806 mutex_lock(&bnad->conf_mutex);
2807 bnad->vlan_grp = vlan_grp;
2808 mutex_unlock(&bnad->conf_mutex);
2812 bnad_vlan_rx_add_vid(struct net_device *netdev,
2815 struct bnad *bnad = netdev_priv(netdev);
2816 unsigned long flags;
2818 if (!bnad->rx_info[0].rx)
2821 mutex_lock(&bnad->conf_mutex);
2823 spin_lock_irqsave(&bnad->bna_lock, flags);
2824 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2825 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2827 mutex_unlock(&bnad->conf_mutex);
2831 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2834 struct bnad *bnad = netdev_priv(netdev);
2835 unsigned long flags;
2837 if (!bnad->rx_info[0].rx)
2840 mutex_lock(&bnad->conf_mutex);
2842 spin_lock_irqsave(&bnad->bna_lock, flags);
2843 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2844 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2846 mutex_unlock(&bnad->conf_mutex);
2849 #ifdef CONFIG_NET_POLL_CONTROLLER
2851 bnad_netpoll(struct net_device *netdev)
2853 struct bnad *bnad = netdev_priv(netdev);
2854 struct bnad_rx_info *rx_info;
2855 struct bnad_rx_ctrl *rx_ctrl;
2859 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2860 bna_intx_disable(&bnad->bna, curr_mask);
2861 bnad_isr(bnad->pcidev->irq, netdev);
2862 bna_intx_enable(&bnad->bna, curr_mask);
2864 for (i = 0; i < bnad->num_rx; i++) {
2865 rx_info = &bnad->rx_info[i];
2868 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2869 rx_ctrl = &rx_info->rx_ctrl[j];
2871 bnad_disable_rx_irq(bnad,
2873 bnad_netif_rx_schedule_poll(bnad,
2882 static const struct net_device_ops bnad_netdev_ops = {
2883 .ndo_open = bnad_open,
2884 .ndo_stop = bnad_stop,
2885 .ndo_start_xmit = bnad_start_xmit,
2886 .ndo_get_stats64 = bnad_get_stats64,
2887 .ndo_set_rx_mode = bnad_set_rx_mode,
2888 .ndo_set_multicast_list = bnad_set_rx_mode,
2889 .ndo_validate_addr = eth_validate_addr,
2890 .ndo_set_mac_address = bnad_set_mac_address,
2891 .ndo_change_mtu = bnad_change_mtu,
2892 .ndo_vlan_rx_register = bnad_vlan_rx_register,
2893 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2894 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2895 #ifdef CONFIG_NET_POLL_CONTROLLER
2896 .ndo_poll_controller = bnad_netpoll
2901 bnad_netdev_init(struct bnad *bnad, bool using_dac)
2903 struct net_device *netdev = bnad->netdev;
2905 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2906 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2907 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
2909 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
2910 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2911 NETIF_F_TSO | NETIF_F_TSO6;
2913 netdev->features |= netdev->hw_features |
2914 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2917 netdev->features |= NETIF_F_HIGHDMA;
2919 netdev->mem_start = bnad->mmio_start;
2920 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2922 netdev->netdev_ops = &bnad_netdev_ops;
2923 bnad_set_ethtool_ops(netdev);
2927 * 1. Initialize the bnad structure
2928 * 2. Setup netdev pointer in pci_dev
2929 * 3. Initialze Tx free tasklet
2930 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2933 bnad_init(struct bnad *bnad,
2934 struct pci_dev *pdev, struct net_device *netdev)
2936 unsigned long flags;
2938 SET_NETDEV_DEV(netdev, &pdev->dev);
2939 pci_set_drvdata(pdev, netdev);
2941 bnad->netdev = netdev;
2942 bnad->pcidev = pdev;
2943 bnad->mmio_start = pci_resource_start(pdev, 0);
2944 bnad->mmio_len = pci_resource_len(pdev, 0);
2945 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2947 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2948 pci_set_drvdata(pdev, NULL);
2951 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2952 (unsigned long long) bnad->mmio_len);
2954 spin_lock_irqsave(&bnad->bna_lock, flags);
2955 if (!bnad_msix_disable)
2956 bnad->cfg_flags = BNAD_CF_MSIX;
2958 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2960 bnad_q_num_init(bnad);
2961 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2963 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2964 (bnad->num_rx * bnad->num_rxp_per_rx) +
2965 BNAD_MAILBOX_MSIX_VECTORS;
2967 bnad->txq_depth = BNAD_TXQ_DEPTH;
2968 bnad->rxq_depth = BNAD_RXQ_DEPTH;
2970 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2971 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2973 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2974 (unsigned long)bnad);
2980 * Must be called after bnad_pci_uninit()
2981 * so that iounmap() and pci_set_drvdata(NULL)
2982 * happens only after PCI uninitialization.
2985 bnad_uninit(struct bnad *bnad)
2988 iounmap(bnad->bar0);
2989 pci_set_drvdata(bnad->pcidev, NULL);
2994 a) Per device mutes used for serializing configuration
2995 changes from OS interface
2996 b) spin lock used to protect bna state machine
2999 bnad_lock_init(struct bnad *bnad)
3001 spin_lock_init(&bnad->bna_lock);
3002 mutex_init(&bnad->conf_mutex);
3006 bnad_lock_uninit(struct bnad *bnad)
3008 mutex_destroy(&bnad->conf_mutex);
3011 /* PCI Initialization */
3013 bnad_pci_init(struct bnad *bnad,
3014 struct pci_dev *pdev, bool *using_dac)
3018 err = pci_enable_device(pdev);
3021 err = pci_request_regions(pdev, BNAD_NAME);
3023 goto disable_device;
3024 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3025 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3028 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3030 err = dma_set_coherent_mask(&pdev->dev,
3033 goto release_regions;
3037 pci_set_master(pdev);
3041 pci_release_regions(pdev);
3043 pci_disable_device(pdev);
3049 bnad_pci_uninit(struct pci_dev *pdev)
3051 pci_release_regions(pdev);
3052 pci_disable_device(pdev);
3055 static int __devinit
3056 bnad_pci_probe(struct pci_dev *pdev,
3057 const struct pci_device_id *pcidev_id)
3059 bool using_dac = false;
3063 struct net_device *netdev;
3064 struct bfa_pcidev pcidev_info;
3065 unsigned long flags;
3067 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3068 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3070 mutex_lock(&bnad_fwimg_mutex);
3071 if (!cna_get_firmware_buf(pdev)) {
3072 mutex_unlock(&bnad_fwimg_mutex);
3073 pr_warn("Failed to load Firmware Image!\n");
3076 mutex_unlock(&bnad_fwimg_mutex);
3079 * Allocates sizeof(struct net_device + struct bnad)
3080 * bnad = netdev->priv
3082 netdev = alloc_etherdev(sizeof(struct bnad));
3084 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3088 bnad = netdev_priv(netdev);
3091 * PCI initialization
3092 * Output : using_dac = 1 for 64 bit DMA
3093 * = 0 for 32 bit DMA
3095 err = bnad_pci_init(bnad, pdev, &using_dac);
3099 bnad_lock_init(bnad);
3101 * Initialize bnad structure
3102 * Setup relation between pci_dev & netdev
3103 * Init Tx free tasklet
3105 err = bnad_init(bnad, pdev, netdev);
3108 /* Initialize netdev structure, set up ethtool ops */
3109 bnad_netdev_init(bnad, using_dac);
3111 /* Set link to down state */
3112 netif_carrier_off(netdev);
3114 bnad_enable_msix(bnad);
3116 /* Get resource requirement form bna */
3117 bna_res_req(&bnad->res_info[0]);
3119 /* Allocate resources from bna */
3120 err = bnad_res_alloc(bnad);
3126 /* Setup pcidev_info for bna_init() */
3127 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3128 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3129 pcidev_info.device_id = bnad->pcidev->device;
3130 pcidev_info.pci_bar_kva = bnad->bar0;
3132 mutex_lock(&bnad->conf_mutex);
3134 spin_lock_irqsave(&bnad->bna_lock, flags);
3135 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3136 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3138 bnad->stats.bna_stats = &bna->stats;
3141 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3142 ((unsigned long)bnad));
3143 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3144 ((unsigned long)bnad));
3145 setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
3146 ((unsigned long)bnad));
3147 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
3148 ((unsigned long)bnad));
3150 /* Now start the timer before calling IOC */
3151 mod_timer(&bnad->bna.device.ioc.iocpf_timer,
3152 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3156 * Don't care even if err != 0, bna state machine will
3159 err = bnad_device_enable(bnad);
3161 /* Get the burnt-in mac */
3162 spin_lock_irqsave(&bnad->bna_lock, flags);
3163 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3164 bnad_set_netdev_perm_addr(bnad);
3165 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3167 mutex_unlock(&bnad->conf_mutex);
3169 /* Finally, reguister with net_device layer */
3170 err = register_netdev(netdev);
3172 pr_err("BNA : Registering with netdev failed\n");
3173 goto disable_device;
3179 mutex_lock(&bnad->conf_mutex);
3180 bnad_device_disable(bnad);
3181 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3182 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3183 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3184 spin_lock_irqsave(&bnad->bna_lock, flags);
3186 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3187 mutex_unlock(&bnad->conf_mutex);
3189 bnad_res_free(bnad);
3190 bnad_disable_msix(bnad);
3192 bnad_pci_uninit(pdev);
3193 bnad_lock_uninit(bnad);
3196 free_netdev(netdev);
3200 static void __devexit
3201 bnad_pci_remove(struct pci_dev *pdev)
3203 struct net_device *netdev = pci_get_drvdata(pdev);
3206 unsigned long flags;
3211 pr_info("%s bnad_pci_remove\n", netdev->name);
3212 bnad = netdev_priv(netdev);
3215 unregister_netdev(netdev);
3217 mutex_lock(&bnad->conf_mutex);
3218 bnad_device_disable(bnad);
3219 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3220 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3221 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3222 spin_lock_irqsave(&bnad->bna_lock, flags);
3224 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3225 mutex_unlock(&bnad->conf_mutex);
3227 bnad_res_free(bnad);
3228 bnad_disable_msix(bnad);
3229 bnad_pci_uninit(pdev);
3230 bnad_lock_uninit(bnad);
3232 free_netdev(netdev);
3235 static const struct pci_device_id bnad_pci_id_table[] = {
3237 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3238 PCI_DEVICE_ID_BROCADE_CT),
3239 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3240 .class_mask = 0xffff00
3244 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3246 static struct pci_driver bnad_pci_driver = {
3248 .id_table = bnad_pci_id_table,
3249 .probe = bnad_pci_probe,
3250 .remove = __devexit_p(bnad_pci_remove),
3254 bnad_module_init(void)
3258 pr_info("Brocade 10G Ethernet driver\n");
3260 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3262 err = pci_register_driver(&bnad_pci_driver);
3264 pr_err("bna : PCI registration failed in module init "
3273 bnad_module_exit(void)
3275 pci_unregister_driver(&bnad_pci_driver);
3278 release_firmware(bfi_fw);
3281 module_init(bnad_module_init);
3282 module_exit(bnad_module_exit);
3284 MODULE_AUTHOR("Brocade");
3285 MODULE_LICENSE("GPL");
3286 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3287 MODULE_VERSION(BNAD_VERSION);
3288 MODULE_FIRMWARE(CNA_FW_FILE_CT);