3061dc5e7a64eb2c3eeb3cbbf4f9f3cf710cc337
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / brocade / bna / bnad.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
29
30 #include "bnad.h"
31 #include "bna.h"
32 #include "cna.h"
33
34 static DEFINE_MUTEX(bnad_fwimg_mutex);
35
36 /*
37  * Module params
38  */
39 static uint bnad_msix_disable;
40 module_param(bnad_msix_disable, uint, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
42
43 static uint bnad_ioc_auto_recover = 1;
44 module_param(bnad_ioc_auto_recover, uint, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
46
47 static uint bna_debugfs_enable = 1;
48 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50                  " Range[false:0|true:1]");
51
52 /*
53  * Global variables
54  */
55 static u32 bnad_rxqs_per_cq = 2;
56 static u32 bna_id;
57 static struct mutex bnad_list_mutex;
58 static LIST_HEAD(bnad_list);
59 static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
60
61 /*
62  * Local MACROS
63  */
64 #define BNAD_GET_MBOX_IRQ(_bnad)                                \
65         (((_bnad)->cfg_flags & BNAD_CF_MSIX) ?                  \
66          ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
67          ((_bnad)->pcidev->irq))
68
69 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size)        \
70 do {                                                            \
71         (_res_info)->res_type = BNA_RES_T_MEM;                  \
72         (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;   \
73         (_res_info)->res_u.mem_info.num = (_num);               \
74         (_res_info)->res_u.mem_info.len = (_size);              \
75 } while (0)
76
77 static void
78 bnad_add_to_list(struct bnad *bnad)
79 {
80         mutex_lock(&bnad_list_mutex);
81         list_add_tail(&bnad->list_entry, &bnad_list);
82         bnad->id = bna_id++;
83         mutex_unlock(&bnad_list_mutex);
84 }
85
86 static void
87 bnad_remove_from_list(struct bnad *bnad)
88 {
89         mutex_lock(&bnad_list_mutex);
90         list_del(&bnad->list_entry);
91         mutex_unlock(&bnad_list_mutex);
92 }
93
94 /*
95  * Reinitialize completions in CQ, once Rx is taken down
96  */
97 static void
98 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
99 {
100         struct bna_cq_entry *cmpl;
101         int i;
102
103         for (i = 0; i < ccb->q_depth; i++) {
104                 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
105                 cmpl->valid = 0;
106         }
107 }
108
109 /* Tx Datapath functions */
110
111
112 /* Caller should ensure that the entry at unmap_q[index] is valid */
113 static u32
114 bnad_tx_buff_unmap(struct bnad *bnad,
115                               struct bnad_tx_unmap *unmap_q,
116                               u32 q_depth, u32 index)
117 {
118         struct bnad_tx_unmap *unmap;
119         struct sk_buff *skb;
120         int vector, nvecs;
121
122         unmap = &unmap_q[index];
123         nvecs = unmap->nvecs;
124
125         skb = unmap->skb;
126         unmap->skb = NULL;
127         unmap->nvecs = 0;
128         dma_unmap_single(&bnad->pcidev->dev,
129                 dma_unmap_addr(&unmap->vectors[0], dma_addr),
130                 skb_headlen(skb), DMA_TO_DEVICE);
131         dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
132         nvecs--;
133
134         vector = 0;
135         while (nvecs) {
136                 vector++;
137                 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
138                         vector = 0;
139                         BNA_QE_INDX_INC(index, q_depth);
140                         unmap = &unmap_q[index];
141                 }
142
143                 dma_unmap_page(&bnad->pcidev->dev,
144                         dma_unmap_addr(&unmap->vectors[vector], dma_addr),
145                         skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
146                 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
147                 nvecs--;
148         }
149
150         BNA_QE_INDX_INC(index, q_depth);
151
152         return index;
153 }
154
155 /*
156  * Frees all pending Tx Bufs
157  * At this point no activity is expected on the Q,
158  * so DMA unmap & freeing is fine.
159  */
160 static void
161 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
162 {
163         struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
164         struct sk_buff *skb;
165         int i;
166
167         for (i = 0; i < tcb->q_depth; i++) {
168                 skb = unmap_q[i].skb;
169                 if (!skb)
170                         continue;
171                 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
172
173                 dev_kfree_skb_any(skb);
174         }
175 }
176
177 /*
178  * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
179  * Can be called in a) Interrupt context
180  *                  b) Sending context
181  */
182 static u32
183 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
184 {
185         u32 sent_packets = 0, sent_bytes = 0;
186         u32 wis, unmap_wis, hw_cons, cons, q_depth;
187         struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
188         struct bnad_tx_unmap *unmap;
189         struct sk_buff *skb;
190
191         /* Just return if TX is stopped */
192         if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
193                 return 0;
194
195         hw_cons = *(tcb->hw_consumer_index);
196         cons = tcb->consumer_index;
197         q_depth = tcb->q_depth;
198
199         wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
200         BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
201
202         while (wis) {
203                 unmap = &unmap_q[cons];
204
205                 skb = unmap->skb;
206
207                 sent_packets++;
208                 sent_bytes += skb->len;
209
210                 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
211                 wis -= unmap_wis;
212
213                 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
214                 dev_kfree_skb_any(skb);
215         }
216
217         /* Update consumer pointers. */
218         tcb->consumer_index = hw_cons;
219
220         tcb->txq->tx_packets += sent_packets;
221         tcb->txq->tx_bytes += sent_bytes;
222
223         return sent_packets;
224 }
225
226 static u32
227 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
228 {
229         struct net_device *netdev = bnad->netdev;
230         u32 sent = 0;
231
232         if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
233                 return 0;
234
235         sent = bnad_txcmpl_process(bnad, tcb);
236         if (sent) {
237                 if (netif_queue_stopped(netdev) &&
238                     netif_carrier_ok(netdev) &&
239                     BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
240                                     BNAD_NETIF_WAKE_THRESHOLD) {
241                         if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
242                                 netif_wake_queue(netdev);
243                                 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
244                         }
245                 }
246         }
247
248         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
249                 bna_ib_ack(tcb->i_dbell, sent);
250
251         smp_mb__before_clear_bit();
252         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
253
254         return sent;
255 }
256
257 /* MSIX Tx Completion Handler */
258 static irqreturn_t
259 bnad_msix_tx(int irq, void *data)
260 {
261         struct bna_tcb *tcb = (struct bna_tcb *)data;
262         struct bnad *bnad = tcb->bnad;
263
264         bnad_tx_complete(bnad, tcb);
265
266         return IRQ_HANDLED;
267 }
268
269 static inline void
270 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
271 {
272         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
273
274         unmap_q->reuse_pi = -1;
275         unmap_q->alloc_order = -1;
276         unmap_q->map_size = 0;
277         unmap_q->type = BNAD_RXBUF_NONE;
278 }
279
280 /* Default is page-based allocation. Multi-buffer support - TBD */
281 static int
282 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
283 {
284         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
285         int order;
286
287         bnad_rxq_alloc_uninit(bnad, rcb);
288
289         order = get_order(rcb->rxq->buffer_size);
290
291         unmap_q->type = BNAD_RXBUF_PAGE;
292
293         if (bna_is_small_rxq(rcb->id)) {
294                 unmap_q->alloc_order = 0;
295                 unmap_q->map_size = rcb->rxq->buffer_size;
296         } else {
297                 if (rcb->rxq->multi_buffer) {
298                         unmap_q->alloc_order = 0;
299                         unmap_q->map_size = rcb->rxq->buffer_size;
300                         unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
301                 } else {
302                         unmap_q->alloc_order = order;
303                         unmap_q->map_size =
304                                 (rcb->rxq->buffer_size > 2048) ?
305                                 PAGE_SIZE << order : 2048;
306                 }
307         }
308
309         BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
310
311         return 0;
312 }
313
314 static inline void
315 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
316 {
317         if (!unmap->page)
318                 return;
319
320         dma_unmap_page(&bnad->pcidev->dev,
321                         dma_unmap_addr(&unmap->vector, dma_addr),
322                         unmap->vector.len, DMA_FROM_DEVICE);
323         put_page(unmap->page);
324         unmap->page = NULL;
325         dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
326         unmap->vector.len = 0;
327 }
328
329 static inline void
330 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
331 {
332         if (!unmap->skb)
333                 return;
334
335         dma_unmap_single(&bnad->pcidev->dev,
336                         dma_unmap_addr(&unmap->vector, dma_addr),
337                         unmap->vector.len, DMA_FROM_DEVICE);
338         dev_kfree_skb_any(unmap->skb);
339         unmap->skb = NULL;
340         dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
341         unmap->vector.len = 0;
342 }
343
344 static void
345 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
346 {
347         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
348         int i;
349
350         for (i = 0; i < rcb->q_depth; i++) {
351                 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
352
353                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
354                         bnad_rxq_cleanup_skb(bnad, unmap);
355                 else
356                         bnad_rxq_cleanup_page(bnad, unmap);
357         }
358         bnad_rxq_alloc_uninit(bnad, rcb);
359 }
360
361 static u32
362 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
363 {
364         u32 alloced, prod, q_depth;
365         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
366         struct bnad_rx_unmap *unmap, *prev;
367         struct bna_rxq_entry *rxent;
368         struct page *page;
369         u32 page_offset, alloc_size;
370         dma_addr_t dma_addr;
371
372         prod = rcb->producer_index;
373         q_depth = rcb->q_depth;
374
375         alloc_size = PAGE_SIZE << unmap_q->alloc_order;
376         alloced = 0;
377
378         while (nalloc--) {
379                 unmap = &unmap_q->unmap[prod];
380
381                 if (unmap_q->reuse_pi < 0) {
382                         page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
383                                         unmap_q->alloc_order);
384                         page_offset = 0;
385                 } else {
386                         prev = &unmap_q->unmap[unmap_q->reuse_pi];
387                         page = prev->page;
388                         page_offset = prev->page_offset + unmap_q->map_size;
389                         get_page(page);
390                 }
391
392                 if (unlikely(!page)) {
393                         BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
394                         rcb->rxq->rxbuf_alloc_failed++;
395                         goto finishing;
396                 }
397
398                 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
399                                 unmap_q->map_size, DMA_FROM_DEVICE);
400
401                 unmap->page = page;
402                 unmap->page_offset = page_offset;
403                 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
404                 unmap->vector.len = unmap_q->map_size;
405                 page_offset += unmap_q->map_size;
406
407                 if (page_offset < alloc_size)
408                         unmap_q->reuse_pi = prod;
409                 else
410                         unmap_q->reuse_pi = -1;
411
412                 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
413                 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
414                 BNA_QE_INDX_INC(prod, q_depth);
415                 alloced++;
416         }
417
418 finishing:
419         if (likely(alloced)) {
420                 rcb->producer_index = prod;
421                 smp_mb();
422                 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
423                         bna_rxq_prod_indx_doorbell(rcb);
424         }
425
426         return alloced;
427 }
428
429 static u32
430 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
431 {
432         u32 alloced, prod, q_depth, buff_sz;
433         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
434         struct bnad_rx_unmap *unmap;
435         struct bna_rxq_entry *rxent;
436         struct sk_buff *skb;
437         dma_addr_t dma_addr;
438
439         buff_sz = rcb->rxq->buffer_size;
440         prod = rcb->producer_index;
441         q_depth = rcb->q_depth;
442
443         alloced = 0;
444         while (nalloc--) {
445                 unmap = &unmap_q->unmap[prod];
446
447                 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
448
449                 if (unlikely(!skb)) {
450                         BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
451                         rcb->rxq->rxbuf_alloc_failed++;
452                         goto finishing;
453                 }
454                 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
455                                           buff_sz, DMA_FROM_DEVICE);
456
457                 unmap->skb = skb;
458                 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
459                 unmap->vector.len = buff_sz;
460
461                 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
462                 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
463                 BNA_QE_INDX_INC(prod, q_depth);
464                 alloced++;
465         }
466
467 finishing:
468         if (likely(alloced)) {
469                 rcb->producer_index = prod;
470                 smp_mb();
471                 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
472                         bna_rxq_prod_indx_doorbell(rcb);
473         }
474
475         return alloced;
476 }
477
478 static inline void
479 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
480 {
481         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
482         u32 to_alloc;
483
484         to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
485         if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
486                 return;
487
488         if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
489                 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
490         else
491                 bnad_rxq_refill_page(bnad, rcb, to_alloc);
492 }
493
494 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
495                                         BNA_CQ_EF_IPV6 | \
496                                         BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
497                                         BNA_CQ_EF_L4_CKSUM_OK)
498
499 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
500                                 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
501 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
502                                 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
503 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
504                                 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
505 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
506                                 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
507
508 static void
509 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
510                     u32 sop_ci, u32 nvecs)
511 {
512         struct bnad_rx_unmap_q *unmap_q;
513         struct bnad_rx_unmap *unmap;
514         u32 ci, vec;
515
516         unmap_q = rcb->unmap_q;
517         for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
518                 unmap = &unmap_q->unmap[ci];
519                 BNA_QE_INDX_INC(ci, rcb->q_depth);
520
521                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
522                         bnad_rxq_cleanup_skb(bnad, unmap);
523                 else
524                         bnad_rxq_cleanup_page(bnad, unmap);
525         }
526 }
527
528 static void
529 bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
530                         u32 sop_ci, u32 nvecs, u32 last_fraglen)
531 {
532         struct bnad *bnad;
533         u32 ci, vec, len, totlen = 0;
534         struct bnad_rx_unmap_q *unmap_q;
535         struct bnad_rx_unmap *unmap;
536
537         unmap_q = rcb->unmap_q;
538         bnad = rcb->bnad;
539
540         /* prefetch header */
541         prefetch(page_address(unmap_q->unmap[sop_ci].page) +
542                         unmap_q->unmap[sop_ci].page_offset);
543
544         for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
545                 unmap = &unmap_q->unmap[ci];
546                 BNA_QE_INDX_INC(ci, rcb->q_depth);
547
548                 dma_unmap_page(&bnad->pcidev->dev,
549                                 dma_unmap_addr(&unmap->vector, dma_addr),
550                                 unmap->vector.len, DMA_FROM_DEVICE);
551
552                 len = (vec == nvecs) ?
553                         last_fraglen : unmap->vector.len;
554                 totlen += len;
555
556                 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
557                                 unmap->page, unmap->page_offset, len);
558
559                 unmap->page = NULL;
560                 unmap->vector.len = 0;
561         }
562
563         skb->len += totlen;
564         skb->data_len += totlen;
565         skb->truesize += totlen;
566 }
567
568 static inline void
569 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
570                   struct bnad_rx_unmap *unmap, u32 len)
571 {
572         prefetch(skb->data);
573
574         dma_unmap_single(&bnad->pcidev->dev,
575                         dma_unmap_addr(&unmap->vector, dma_addr),
576                         unmap->vector.len, DMA_FROM_DEVICE);
577
578         skb_put(skb, len);
579         skb->protocol = eth_type_trans(skb, bnad->netdev);
580
581         unmap->skb = NULL;
582         unmap->vector.len = 0;
583 }
584
585 static u32
586 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
587 {
588         struct bna_cq_entry *cq, *cmpl, *next_cmpl;
589         struct bna_rcb *rcb = NULL;
590         struct bnad_rx_unmap_q *unmap_q;
591         struct bnad_rx_unmap *unmap = NULL;
592         struct sk_buff *skb = NULL;
593         struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
594         struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
595         u32 packets = 0, len = 0, totlen = 0;
596         u32 pi, vec, sop_ci = 0, nvecs = 0;
597         u32 flags, masked_flags;
598
599         prefetch(bnad->netdev);
600
601         cq = ccb->sw_q;
602         cmpl = &cq[ccb->producer_index];
603
604         while (cmpl->valid && (packets < budget)) {
605                 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
606
607                 if (bna_is_small_rxq(cmpl->rxq_id))
608                         rcb = ccb->rcb[1];
609                 else
610                         rcb = ccb->rcb[0];
611
612                 unmap_q = rcb->unmap_q;
613
614                 /* start of packet ci */
615                 sop_ci = rcb->consumer_index;
616
617                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
618                         unmap = &unmap_q->unmap[sop_ci];
619                         skb = unmap->skb;
620                 } else {
621                         skb = napi_get_frags(&rx_ctrl->napi);
622                         if (unlikely(!skb))
623                                 break;
624                 }
625                 prefetch(skb);
626
627                 flags = ntohl(cmpl->flags);
628                 len = ntohs(cmpl->length);
629                 totlen = len;
630                 nvecs = 1;
631
632                 /* Check all the completions for this frame.
633                  * busy-wait doesn't help much, break here.
634                  */
635                 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
636                     (flags & BNA_CQ_EF_EOP) == 0) {
637                         pi = ccb->producer_index;
638                         do {
639                                 BNA_QE_INDX_INC(pi, ccb->q_depth);
640                                 next_cmpl = &cq[pi];
641
642                                 if (!next_cmpl->valid)
643                                         break;
644
645                                 len = ntohs(next_cmpl->length);
646                                 flags = ntohl(next_cmpl->flags);
647
648                                 nvecs++;
649                                 totlen += len;
650                         } while ((flags & BNA_CQ_EF_EOP) == 0);
651
652                         if (!next_cmpl->valid)
653                                 break;
654                 }
655
656                 /* TODO: BNA_CQ_EF_LOCAL ? */
657                 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
658                                                 BNA_CQ_EF_FCS_ERROR |
659                                                 BNA_CQ_EF_TOO_LONG))) {
660                         bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
661                         rcb->rxq->rx_packets_with_error++;
662
663                         goto next;
664                 }
665
666                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
667                         bnad_cq_setup_skb(bnad, skb, unmap, len);
668                 else
669                         bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
670
671                 packets++;
672                 rcb->rxq->rx_packets++;
673                 rcb->rxq->rx_bytes += totlen;
674                 ccb->bytes_per_intr += totlen;
675
676                 masked_flags = flags & flags_cksum_prot_mask;
677
678                 if (likely
679                     ((bnad->netdev->features & NETIF_F_RXCSUM) &&
680                      ((masked_flags == flags_tcp4) ||
681                       (masked_flags == flags_udp4) ||
682                       (masked_flags == flags_tcp6) ||
683                       (masked_flags == flags_udp6))))
684                         skb->ip_summed = CHECKSUM_UNNECESSARY;
685                 else
686                         skb_checksum_none_assert(skb);
687
688                 if (flags & BNA_CQ_EF_VLAN)
689                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
690
691                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
692                         netif_receive_skb(skb);
693                 else
694                         napi_gro_frags(&rx_ctrl->napi);
695
696 next:
697                 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
698                 for (vec = 0; vec < nvecs; vec++) {
699                         cmpl = &cq[ccb->producer_index];
700                         cmpl->valid = 0;
701                         BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
702                 }
703                 cmpl = &cq[ccb->producer_index];
704         }
705
706         napi_gro_flush(&rx_ctrl->napi, false);
707         if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
708                 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
709
710         bnad_rxq_post(bnad, ccb->rcb[0]);
711         if (ccb->rcb[1])
712                 bnad_rxq_post(bnad, ccb->rcb[1]);
713
714         return packets;
715 }
716
717 static void
718 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
719 {
720         struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
721         struct napi_struct *napi = &rx_ctrl->napi;
722
723         if (likely(napi_schedule_prep(napi))) {
724                 __napi_schedule(napi);
725                 rx_ctrl->rx_schedule++;
726         }
727 }
728
729 /* MSIX Rx Path Handler */
730 static irqreturn_t
731 bnad_msix_rx(int irq, void *data)
732 {
733         struct bna_ccb *ccb = (struct bna_ccb *)data;
734
735         if (ccb) {
736                 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
737                 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
738         }
739
740         return IRQ_HANDLED;
741 }
742
743 /* Interrupt handlers */
744
745 /* Mbox Interrupt Handlers */
746 static irqreturn_t
747 bnad_msix_mbox_handler(int irq, void *data)
748 {
749         u32 intr_status;
750         unsigned long flags;
751         struct bnad *bnad = (struct bnad *)data;
752
753         spin_lock_irqsave(&bnad->bna_lock, flags);
754         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
755                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
756                 return IRQ_HANDLED;
757         }
758
759         bna_intr_status_get(&bnad->bna, intr_status);
760
761         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
762                 bna_mbox_handler(&bnad->bna, intr_status);
763
764         spin_unlock_irqrestore(&bnad->bna_lock, flags);
765
766         return IRQ_HANDLED;
767 }
768
769 static irqreturn_t
770 bnad_isr(int irq, void *data)
771 {
772         int i, j;
773         u32 intr_status;
774         unsigned long flags;
775         struct bnad *bnad = (struct bnad *)data;
776         struct bnad_rx_info *rx_info;
777         struct bnad_rx_ctrl *rx_ctrl;
778         struct bna_tcb *tcb = NULL;
779
780         spin_lock_irqsave(&bnad->bna_lock, flags);
781         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
782                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
783                 return IRQ_NONE;
784         }
785
786         bna_intr_status_get(&bnad->bna, intr_status);
787
788         if (unlikely(!intr_status)) {
789                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
790                 return IRQ_NONE;
791         }
792
793         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
794                 bna_mbox_handler(&bnad->bna, intr_status);
795
796         spin_unlock_irqrestore(&bnad->bna_lock, flags);
797
798         if (!BNA_IS_INTX_DATA_INTR(intr_status))
799                 return IRQ_HANDLED;
800
801         /* Process data interrupts */
802         /* Tx processing */
803         for (i = 0; i < bnad->num_tx; i++) {
804                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
805                         tcb = bnad->tx_info[i].tcb[j];
806                         if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
807                                 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
808                 }
809         }
810         /* Rx processing */
811         for (i = 0; i < bnad->num_rx; i++) {
812                 rx_info = &bnad->rx_info[i];
813                 if (!rx_info->rx)
814                         continue;
815                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
816                         rx_ctrl = &rx_info->rx_ctrl[j];
817                         if (rx_ctrl->ccb)
818                                 bnad_netif_rx_schedule_poll(bnad,
819                                                             rx_ctrl->ccb);
820                 }
821         }
822         return IRQ_HANDLED;
823 }
824
825 /*
826  * Called in interrupt / callback context
827  * with bna_lock held, so cfg_flags access is OK
828  */
829 static void
830 bnad_enable_mbox_irq(struct bnad *bnad)
831 {
832         clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
833
834         BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
835 }
836
837 /*
838  * Called with bnad->bna_lock held b'cos of
839  * bnad->cfg_flags access.
840  */
841 static void
842 bnad_disable_mbox_irq(struct bnad *bnad)
843 {
844         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
845
846         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
847 }
848
849 static void
850 bnad_set_netdev_perm_addr(struct bnad *bnad)
851 {
852         struct net_device *netdev = bnad->netdev;
853
854         memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
855         if (is_zero_ether_addr(netdev->dev_addr))
856                 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
857 }
858
859 /* Control Path Handlers */
860
861 /* Callbacks */
862 void
863 bnad_cb_mbox_intr_enable(struct bnad *bnad)
864 {
865         bnad_enable_mbox_irq(bnad);
866 }
867
868 void
869 bnad_cb_mbox_intr_disable(struct bnad *bnad)
870 {
871         bnad_disable_mbox_irq(bnad);
872 }
873
874 void
875 bnad_cb_ioceth_ready(struct bnad *bnad)
876 {
877         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
878         complete(&bnad->bnad_completions.ioc_comp);
879 }
880
881 void
882 bnad_cb_ioceth_failed(struct bnad *bnad)
883 {
884         bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
885         complete(&bnad->bnad_completions.ioc_comp);
886 }
887
888 void
889 bnad_cb_ioceth_disabled(struct bnad *bnad)
890 {
891         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
892         complete(&bnad->bnad_completions.ioc_comp);
893 }
894
895 static void
896 bnad_cb_enet_disabled(void *arg)
897 {
898         struct bnad *bnad = (struct bnad *)arg;
899
900         netif_carrier_off(bnad->netdev);
901         complete(&bnad->bnad_completions.enet_comp);
902 }
903
904 void
905 bnad_cb_ethport_link_status(struct bnad *bnad,
906                         enum bna_link_status link_status)
907 {
908         bool link_up = false;
909
910         link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
911
912         if (link_status == BNA_CEE_UP) {
913                 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
914                         BNAD_UPDATE_CTR(bnad, cee_toggle);
915                 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
916         } else {
917                 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
918                         BNAD_UPDATE_CTR(bnad, cee_toggle);
919                 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
920         }
921
922         if (link_up) {
923                 if (!netif_carrier_ok(bnad->netdev)) {
924                         uint tx_id, tcb_id;
925                         printk(KERN_WARNING "bna: %s link up\n",
926                                 bnad->netdev->name);
927                         netif_carrier_on(bnad->netdev);
928                         BNAD_UPDATE_CTR(bnad, link_toggle);
929                         for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
930                                 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
931                                       tcb_id++) {
932                                         struct bna_tcb *tcb =
933                                         bnad->tx_info[tx_id].tcb[tcb_id];
934                                         u32 txq_id;
935                                         if (!tcb)
936                                                 continue;
937
938                                         txq_id = tcb->id;
939
940                                         if (test_bit(BNAD_TXQ_TX_STARTED,
941                                                      &tcb->flags)) {
942                                                 /*
943                                                  * Force an immediate
944                                                  * Transmit Schedule */
945                                                 printk(KERN_INFO "bna: %s %d "
946                                                       "TXQ_STARTED\n",
947                                                        bnad->netdev->name,
948                                                        txq_id);
949                                                 netif_wake_subqueue(
950                                                                 bnad->netdev,
951                                                                 txq_id);
952                                                 BNAD_UPDATE_CTR(bnad,
953                                                         netif_queue_wakeup);
954                                         } else {
955                                                 netif_stop_subqueue(
956                                                                 bnad->netdev,
957                                                                 txq_id);
958                                                 BNAD_UPDATE_CTR(bnad,
959                                                         netif_queue_stop);
960                                         }
961                                 }
962                         }
963                 }
964         } else {
965                 if (netif_carrier_ok(bnad->netdev)) {
966                         printk(KERN_WARNING "bna: %s link down\n",
967                                 bnad->netdev->name);
968                         netif_carrier_off(bnad->netdev);
969                         BNAD_UPDATE_CTR(bnad, link_toggle);
970                 }
971         }
972 }
973
974 static void
975 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
976 {
977         struct bnad *bnad = (struct bnad *)arg;
978
979         complete(&bnad->bnad_completions.tx_comp);
980 }
981
982 static void
983 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
984 {
985         struct bnad_tx_info *tx_info =
986                         (struct bnad_tx_info *)tcb->txq->tx->priv;
987
988         tcb->priv = tcb;
989         tx_info->tcb[tcb->id] = tcb;
990 }
991
992 static void
993 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
994 {
995         struct bnad_tx_info *tx_info =
996                         (struct bnad_tx_info *)tcb->txq->tx->priv;
997
998         tx_info->tcb[tcb->id] = NULL;
999         tcb->priv = NULL;
1000 }
1001
1002 static void
1003 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1004 {
1005         struct bnad_rx_info *rx_info =
1006                         (struct bnad_rx_info *)ccb->cq->rx->priv;
1007
1008         rx_info->rx_ctrl[ccb->id].ccb = ccb;
1009         ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1010 }
1011
1012 static void
1013 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1014 {
1015         struct bnad_rx_info *rx_info =
1016                         (struct bnad_rx_info *)ccb->cq->rx->priv;
1017
1018         rx_info->rx_ctrl[ccb->id].ccb = NULL;
1019 }
1020
1021 static void
1022 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1023 {
1024         struct bnad_tx_info *tx_info =
1025                         (struct bnad_tx_info *)tx->priv;
1026         struct bna_tcb *tcb;
1027         u32 txq_id;
1028         int i;
1029
1030         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1031                 tcb = tx_info->tcb[i];
1032                 if (!tcb)
1033                         continue;
1034                 txq_id = tcb->id;
1035                 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1036                 netif_stop_subqueue(bnad->netdev, txq_id);
1037                 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
1038                         bnad->netdev->name, txq_id);
1039         }
1040 }
1041
1042 static void
1043 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1044 {
1045         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1046         struct bna_tcb *tcb;
1047         u32 txq_id;
1048         int i;
1049
1050         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1051                 tcb = tx_info->tcb[i];
1052                 if (!tcb)
1053                         continue;
1054                 txq_id = tcb->id;
1055
1056                 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1057                 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1058                 BUG_ON(*(tcb->hw_consumer_index) != 0);
1059
1060                 if (netif_carrier_ok(bnad->netdev)) {
1061                         printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
1062                                 bnad->netdev->name, txq_id);
1063                         netif_wake_subqueue(bnad->netdev, txq_id);
1064                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1065                 }
1066         }
1067
1068         /*
1069          * Workaround for first ioceth enable failure & we
1070          * get a 0 MAC address. We try to get the MAC address
1071          * again here.
1072          */
1073         if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
1074                 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
1075                 bnad_set_netdev_perm_addr(bnad);
1076         }
1077 }
1078
1079 /*
1080  * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1081  */
1082 static void
1083 bnad_tx_cleanup(struct delayed_work *work)
1084 {
1085         struct bnad_tx_info *tx_info =
1086                 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1087         struct bnad *bnad = NULL;
1088         struct bna_tcb *tcb;
1089         unsigned long flags;
1090         u32 i, pending = 0;
1091
1092         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1093                 tcb = tx_info->tcb[i];
1094                 if (!tcb)
1095                         continue;
1096
1097                 bnad = tcb->bnad;
1098
1099                 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1100                         pending++;
1101                         continue;
1102                 }
1103
1104                 bnad_txq_cleanup(bnad, tcb);
1105
1106                 smp_mb__before_clear_bit();
1107                 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1108         }
1109
1110         if (pending) {
1111                 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1112                         msecs_to_jiffies(1));
1113                 return;
1114         }
1115
1116         spin_lock_irqsave(&bnad->bna_lock, flags);
1117         bna_tx_cleanup_complete(tx_info->tx);
1118         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1119 }
1120
1121 static void
1122 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1123 {
1124         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1125         struct bna_tcb *tcb;
1126         int i;
1127
1128         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1129                 tcb = tx_info->tcb[i];
1130                 if (!tcb)
1131                         continue;
1132         }
1133
1134         queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1135 }
1136
1137 static void
1138 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1139 {
1140         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1141         struct bna_ccb *ccb;
1142         struct bnad_rx_ctrl *rx_ctrl;
1143         int i;
1144
1145         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1146                 rx_ctrl = &rx_info->rx_ctrl[i];
1147                 ccb = rx_ctrl->ccb;
1148                 if (!ccb)
1149                         continue;
1150
1151                 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1152
1153                 if (ccb->rcb[1])
1154                         clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1155         }
1156 }
1157
1158 /*
1159  * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1160  */
1161 static void
1162 bnad_rx_cleanup(void *work)
1163 {
1164         struct bnad_rx_info *rx_info =
1165                 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1166         struct bnad_rx_ctrl *rx_ctrl;
1167         struct bnad *bnad = NULL;
1168         unsigned long flags;
1169         u32 i;
1170
1171         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1172                 rx_ctrl = &rx_info->rx_ctrl[i];
1173
1174                 if (!rx_ctrl->ccb)
1175                         continue;
1176
1177                 bnad = rx_ctrl->ccb->bnad;
1178
1179                 /*
1180                  * Wait till the poll handler has exited
1181                  * and nothing can be scheduled anymore
1182                  */
1183                 napi_disable(&rx_ctrl->napi);
1184
1185                 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1186                 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1187                 if (rx_ctrl->ccb->rcb[1])
1188                         bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1189         }
1190
1191         spin_lock_irqsave(&bnad->bna_lock, flags);
1192         bna_rx_cleanup_complete(rx_info->rx);
1193         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1194 }
1195
1196 static void
1197 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1198 {
1199         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1200         struct bna_ccb *ccb;
1201         struct bnad_rx_ctrl *rx_ctrl;
1202         int i;
1203
1204         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1205                 rx_ctrl = &rx_info->rx_ctrl[i];
1206                 ccb = rx_ctrl->ccb;
1207                 if (!ccb)
1208                         continue;
1209
1210                 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1211
1212                 if (ccb->rcb[1])
1213                         clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1214         }
1215
1216         queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1217 }
1218
1219 static void
1220 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1221 {
1222         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1223         struct bna_ccb *ccb;
1224         struct bna_rcb *rcb;
1225         struct bnad_rx_ctrl *rx_ctrl;
1226         int i, j;
1227
1228         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1229                 rx_ctrl = &rx_info->rx_ctrl[i];
1230                 ccb = rx_ctrl->ccb;
1231                 if (!ccb)
1232                         continue;
1233
1234                 napi_enable(&rx_ctrl->napi);
1235
1236                 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1237                         rcb = ccb->rcb[j];
1238                         if (!rcb)
1239                                 continue;
1240
1241                         bnad_rxq_alloc_init(bnad, rcb);
1242                         set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1243                         set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1244                         bnad_rxq_post(bnad, rcb);
1245                 }
1246         }
1247 }
1248
1249 static void
1250 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1251 {
1252         struct bnad *bnad = (struct bnad *)arg;
1253
1254         complete(&bnad->bnad_completions.rx_comp);
1255 }
1256
1257 static void
1258 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1259 {
1260         bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1261         complete(&bnad->bnad_completions.mcast_comp);
1262 }
1263
1264 void
1265 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1266                        struct bna_stats *stats)
1267 {
1268         if (status == BNA_CB_SUCCESS)
1269                 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1270
1271         if (!netif_running(bnad->netdev) ||
1272                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1273                 return;
1274
1275         mod_timer(&bnad->stats_timer,
1276                   jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1277 }
1278
1279 static void
1280 bnad_cb_enet_mtu_set(struct bnad *bnad)
1281 {
1282         bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1283         complete(&bnad->bnad_completions.mtu_comp);
1284 }
1285
1286 void
1287 bnad_cb_completion(void *arg, enum bfa_status status)
1288 {
1289         struct bnad_iocmd_comp *iocmd_comp =
1290                         (struct bnad_iocmd_comp *)arg;
1291
1292         iocmd_comp->comp_status = (u32) status;
1293         complete(&iocmd_comp->comp);
1294 }
1295
1296 /* Resource allocation, free functions */
1297
1298 static void
1299 bnad_mem_free(struct bnad *bnad,
1300               struct bna_mem_info *mem_info)
1301 {
1302         int i;
1303         dma_addr_t dma_pa;
1304
1305         if (mem_info->mdl == NULL)
1306                 return;
1307
1308         for (i = 0; i < mem_info->num; i++) {
1309                 if (mem_info->mdl[i].kva != NULL) {
1310                         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1311                                 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1312                                                 dma_pa);
1313                                 dma_free_coherent(&bnad->pcidev->dev,
1314                                                   mem_info->mdl[i].len,
1315                                                   mem_info->mdl[i].kva, dma_pa);
1316                         } else
1317                                 kfree(mem_info->mdl[i].kva);
1318                 }
1319         }
1320         kfree(mem_info->mdl);
1321         mem_info->mdl = NULL;
1322 }
1323
1324 static int
1325 bnad_mem_alloc(struct bnad *bnad,
1326                struct bna_mem_info *mem_info)
1327 {
1328         int i;
1329         dma_addr_t dma_pa;
1330
1331         if ((mem_info->num == 0) || (mem_info->len == 0)) {
1332                 mem_info->mdl = NULL;
1333                 return 0;
1334         }
1335
1336         mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1337                                 GFP_KERNEL);
1338         if (mem_info->mdl == NULL)
1339                 return -ENOMEM;
1340
1341         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1342                 for (i = 0; i < mem_info->num; i++) {
1343                         mem_info->mdl[i].len = mem_info->len;
1344                         mem_info->mdl[i].kva =
1345                                 dma_alloc_coherent(&bnad->pcidev->dev,
1346                                                    mem_info->len, &dma_pa,
1347                                                    GFP_KERNEL);
1348                         if (mem_info->mdl[i].kva == NULL)
1349                                 goto err_return;
1350
1351                         BNA_SET_DMA_ADDR(dma_pa,
1352                                          &(mem_info->mdl[i].dma));
1353                 }
1354         } else {
1355                 for (i = 0; i < mem_info->num; i++) {
1356                         mem_info->mdl[i].len = mem_info->len;
1357                         mem_info->mdl[i].kva = kzalloc(mem_info->len,
1358                                                         GFP_KERNEL);
1359                         if (mem_info->mdl[i].kva == NULL)
1360                                 goto err_return;
1361                 }
1362         }
1363
1364         return 0;
1365
1366 err_return:
1367         bnad_mem_free(bnad, mem_info);
1368         return -ENOMEM;
1369 }
1370
1371 /* Free IRQ for Mailbox */
1372 static void
1373 bnad_mbox_irq_free(struct bnad *bnad)
1374 {
1375         int irq;
1376         unsigned long flags;
1377
1378         spin_lock_irqsave(&bnad->bna_lock, flags);
1379         bnad_disable_mbox_irq(bnad);
1380         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1381
1382         irq = BNAD_GET_MBOX_IRQ(bnad);
1383         free_irq(irq, bnad);
1384 }
1385
1386 /*
1387  * Allocates IRQ for Mailbox, but keep it disabled
1388  * This will be enabled once we get the mbox enable callback
1389  * from bna
1390  */
1391 static int
1392 bnad_mbox_irq_alloc(struct bnad *bnad)
1393 {
1394         int             err = 0;
1395         unsigned long   irq_flags, flags;
1396         u32     irq;
1397         irq_handler_t   irq_handler;
1398
1399         spin_lock_irqsave(&bnad->bna_lock, flags);
1400         if (bnad->cfg_flags & BNAD_CF_MSIX) {
1401                 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1402                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1403                 irq_flags = 0;
1404         } else {
1405                 irq_handler = (irq_handler_t)bnad_isr;
1406                 irq = bnad->pcidev->irq;
1407                 irq_flags = IRQF_SHARED;
1408         }
1409
1410         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1411         sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1412
1413         /*
1414          * Set the Mbox IRQ disable flag, so that the IRQ handler
1415          * called from request_irq() for SHARED IRQs do not execute
1416          */
1417         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1418
1419         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1420
1421         err = request_irq(irq, irq_handler, irq_flags,
1422                           bnad->mbox_irq_name, bnad);
1423
1424         return err;
1425 }
1426
1427 static void
1428 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1429 {
1430         kfree(intr_info->idl);
1431         intr_info->idl = NULL;
1432 }
1433
1434 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1435 static int
1436 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1437                     u32 txrx_id, struct bna_intr_info *intr_info)
1438 {
1439         int i, vector_start = 0;
1440         u32 cfg_flags;
1441         unsigned long flags;
1442
1443         spin_lock_irqsave(&bnad->bna_lock, flags);
1444         cfg_flags = bnad->cfg_flags;
1445         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1446
1447         if (cfg_flags & BNAD_CF_MSIX) {
1448                 intr_info->intr_type = BNA_INTR_T_MSIX;
1449                 intr_info->idl = kcalloc(intr_info->num,
1450                                         sizeof(struct bna_intr_descr),
1451                                         GFP_KERNEL);
1452                 if (!intr_info->idl)
1453                         return -ENOMEM;
1454
1455                 switch (src) {
1456                 case BNAD_INTR_TX:
1457                         vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1458                         break;
1459
1460                 case BNAD_INTR_RX:
1461                         vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1462                                         (bnad->num_tx * bnad->num_txq_per_tx) +
1463                                         txrx_id;
1464                         break;
1465
1466                 default:
1467                         BUG();
1468                 }
1469
1470                 for (i = 0; i < intr_info->num; i++)
1471                         intr_info->idl[i].vector = vector_start + i;
1472         } else {
1473                 intr_info->intr_type = BNA_INTR_T_INTX;
1474                 intr_info->num = 1;
1475                 intr_info->idl = kcalloc(intr_info->num,
1476                                         sizeof(struct bna_intr_descr),
1477                                         GFP_KERNEL);
1478                 if (!intr_info->idl)
1479                         return -ENOMEM;
1480
1481                 switch (src) {
1482                 case BNAD_INTR_TX:
1483                         intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1484                         break;
1485
1486                 case BNAD_INTR_RX:
1487                         intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1488                         break;
1489                 }
1490         }
1491         return 0;
1492 }
1493
1494 /* NOTE: Should be called for MSIX only
1495  * Unregisters Tx MSIX vector(s) from the kernel
1496  */
1497 static void
1498 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1499                         int num_txqs)
1500 {
1501         int i;
1502         int vector_num;
1503
1504         for (i = 0; i < num_txqs; i++) {
1505                 if (tx_info->tcb[i] == NULL)
1506                         continue;
1507
1508                 vector_num = tx_info->tcb[i]->intr_vector;
1509                 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1510         }
1511 }
1512
1513 /* NOTE: Should be called for MSIX only
1514  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1515  */
1516 static int
1517 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1518                         u32 tx_id, int num_txqs)
1519 {
1520         int i;
1521         int err;
1522         int vector_num;
1523
1524         for (i = 0; i < num_txqs; i++) {
1525                 vector_num = tx_info->tcb[i]->intr_vector;
1526                 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1527                                 tx_id + tx_info->tcb[i]->id);
1528                 err = request_irq(bnad->msix_table[vector_num].vector,
1529                                   (irq_handler_t)bnad_msix_tx, 0,
1530                                   tx_info->tcb[i]->name,
1531                                   tx_info->tcb[i]);
1532                 if (err)
1533                         goto err_return;
1534         }
1535
1536         return 0;
1537
1538 err_return:
1539         if (i > 0)
1540                 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1541         return -1;
1542 }
1543
1544 /* NOTE: Should be called for MSIX only
1545  * Unregisters Rx MSIX vector(s) from the kernel
1546  */
1547 static void
1548 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1549                         int num_rxps)
1550 {
1551         int i;
1552         int vector_num;
1553
1554         for (i = 0; i < num_rxps; i++) {
1555                 if (rx_info->rx_ctrl[i].ccb == NULL)
1556                         continue;
1557
1558                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1559                 free_irq(bnad->msix_table[vector_num].vector,
1560                          rx_info->rx_ctrl[i].ccb);
1561         }
1562 }
1563
1564 /* NOTE: Should be called for MSIX only
1565  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1566  */
1567 static int
1568 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1569                         u32 rx_id, int num_rxps)
1570 {
1571         int i;
1572         int err;
1573         int vector_num;
1574
1575         for (i = 0; i < num_rxps; i++) {
1576                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1577                 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1578                         bnad->netdev->name,
1579                         rx_id + rx_info->rx_ctrl[i].ccb->id);
1580                 err = request_irq(bnad->msix_table[vector_num].vector,
1581                                   (irq_handler_t)bnad_msix_rx, 0,
1582                                   rx_info->rx_ctrl[i].ccb->name,
1583                                   rx_info->rx_ctrl[i].ccb);
1584                 if (err)
1585                         goto err_return;
1586         }
1587
1588         return 0;
1589
1590 err_return:
1591         if (i > 0)
1592                 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1593         return -1;
1594 }
1595
1596 /* Free Tx object Resources */
1597 static void
1598 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1599 {
1600         int i;
1601
1602         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1603                 if (res_info[i].res_type == BNA_RES_T_MEM)
1604                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1605                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1606                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1607         }
1608 }
1609
1610 /* Allocates memory and interrupt resources for Tx object */
1611 static int
1612 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1613                   u32 tx_id)
1614 {
1615         int i, err = 0;
1616
1617         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1618                 if (res_info[i].res_type == BNA_RES_T_MEM)
1619                         err = bnad_mem_alloc(bnad,
1620                                         &res_info[i].res_u.mem_info);
1621                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1622                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1623                                         &res_info[i].res_u.intr_info);
1624                 if (err)
1625                         goto err_return;
1626         }
1627         return 0;
1628
1629 err_return:
1630         bnad_tx_res_free(bnad, res_info);
1631         return err;
1632 }
1633
1634 /* Free Rx object Resources */
1635 static void
1636 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1637 {
1638         int i;
1639
1640         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1641                 if (res_info[i].res_type == BNA_RES_T_MEM)
1642                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1643                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1644                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1645         }
1646 }
1647
1648 /* Allocates memory and interrupt resources for Rx object */
1649 static int
1650 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1651                   uint rx_id)
1652 {
1653         int i, err = 0;
1654
1655         /* All memory needs to be allocated before setup_ccbs */
1656         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1657                 if (res_info[i].res_type == BNA_RES_T_MEM)
1658                         err = bnad_mem_alloc(bnad,
1659                                         &res_info[i].res_u.mem_info);
1660                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1661                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1662                                         &res_info[i].res_u.intr_info);
1663                 if (err)
1664                         goto err_return;
1665         }
1666         return 0;
1667
1668 err_return:
1669         bnad_rx_res_free(bnad, res_info);
1670         return err;
1671 }
1672
1673 /* Timer callbacks */
1674 /* a) IOC timer */
1675 static void
1676 bnad_ioc_timeout(unsigned long data)
1677 {
1678         struct bnad *bnad = (struct bnad *)data;
1679         unsigned long flags;
1680
1681         spin_lock_irqsave(&bnad->bna_lock, flags);
1682         bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1683         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1684 }
1685
1686 static void
1687 bnad_ioc_hb_check(unsigned long data)
1688 {
1689         struct bnad *bnad = (struct bnad *)data;
1690         unsigned long flags;
1691
1692         spin_lock_irqsave(&bnad->bna_lock, flags);
1693         bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1694         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1695 }
1696
1697 static void
1698 bnad_iocpf_timeout(unsigned long data)
1699 {
1700         struct bnad *bnad = (struct bnad *)data;
1701         unsigned long flags;
1702
1703         spin_lock_irqsave(&bnad->bna_lock, flags);
1704         bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1705         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1706 }
1707
1708 static void
1709 bnad_iocpf_sem_timeout(unsigned long data)
1710 {
1711         struct bnad *bnad = (struct bnad *)data;
1712         unsigned long flags;
1713
1714         spin_lock_irqsave(&bnad->bna_lock, flags);
1715         bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1716         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1717 }
1718
1719 /*
1720  * All timer routines use bnad->bna_lock to protect against
1721  * the following race, which may occur in case of no locking:
1722  *      Time    CPU m   CPU n
1723  *      0       1 = test_bit
1724  *      1                       clear_bit
1725  *      2                       del_timer_sync
1726  *      3       mod_timer
1727  */
1728
1729 /* b) Dynamic Interrupt Moderation Timer */
1730 static void
1731 bnad_dim_timeout(unsigned long data)
1732 {
1733         struct bnad *bnad = (struct bnad *)data;
1734         struct bnad_rx_info *rx_info;
1735         struct bnad_rx_ctrl *rx_ctrl;
1736         int i, j;
1737         unsigned long flags;
1738
1739         if (!netif_carrier_ok(bnad->netdev))
1740                 return;
1741
1742         spin_lock_irqsave(&bnad->bna_lock, flags);
1743         for (i = 0; i < bnad->num_rx; i++) {
1744                 rx_info = &bnad->rx_info[i];
1745                 if (!rx_info->rx)
1746                         continue;
1747                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1748                         rx_ctrl = &rx_info->rx_ctrl[j];
1749                         if (!rx_ctrl->ccb)
1750                                 continue;
1751                         bna_rx_dim_update(rx_ctrl->ccb);
1752                 }
1753         }
1754
1755         /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1756         if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1757                 mod_timer(&bnad->dim_timer,
1758                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1759         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1760 }
1761
1762 /* c)  Statistics Timer */
1763 static void
1764 bnad_stats_timeout(unsigned long data)
1765 {
1766         struct bnad *bnad = (struct bnad *)data;
1767         unsigned long flags;
1768
1769         if (!netif_running(bnad->netdev) ||
1770                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1771                 return;
1772
1773         spin_lock_irqsave(&bnad->bna_lock, flags);
1774         bna_hw_stats_get(&bnad->bna);
1775         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1776 }
1777
1778 /*
1779  * Set up timer for DIM
1780  * Called with bnad->bna_lock held
1781  */
1782 void
1783 bnad_dim_timer_start(struct bnad *bnad)
1784 {
1785         if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1786             !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1787                 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1788                             (unsigned long)bnad);
1789                 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1790                 mod_timer(&bnad->dim_timer,
1791                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1792         }
1793 }
1794
1795 /*
1796  * Set up timer for statistics
1797  * Called with mutex_lock(&bnad->conf_mutex) held
1798  */
1799 static void
1800 bnad_stats_timer_start(struct bnad *bnad)
1801 {
1802         unsigned long flags;
1803
1804         spin_lock_irqsave(&bnad->bna_lock, flags);
1805         if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1806                 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1807                             (unsigned long)bnad);
1808                 mod_timer(&bnad->stats_timer,
1809                           jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1810         }
1811         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1812 }
1813
1814 /*
1815  * Stops the stats timer
1816  * Called with mutex_lock(&bnad->conf_mutex) held
1817  */
1818 static void
1819 bnad_stats_timer_stop(struct bnad *bnad)
1820 {
1821         int to_del = 0;
1822         unsigned long flags;
1823
1824         spin_lock_irqsave(&bnad->bna_lock, flags);
1825         if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1826                 to_del = 1;
1827         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1828         if (to_del)
1829                 del_timer_sync(&bnad->stats_timer);
1830 }
1831
1832 /* Utilities */
1833
1834 static void
1835 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1836 {
1837         int i = 1; /* Index 0 has broadcast address */
1838         struct netdev_hw_addr *mc_addr;
1839
1840         netdev_for_each_mc_addr(mc_addr, netdev) {
1841                 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1842                                                         ETH_ALEN);
1843                 i++;
1844         }
1845 }
1846
1847 static int
1848 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1849 {
1850         struct bnad_rx_ctrl *rx_ctrl =
1851                 container_of(napi, struct bnad_rx_ctrl, napi);
1852         struct bnad *bnad = rx_ctrl->bnad;
1853         int rcvd = 0;
1854
1855         rx_ctrl->rx_poll_ctr++;
1856
1857         if (!netif_carrier_ok(bnad->netdev))
1858                 goto poll_exit;
1859
1860         rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1861         if (rcvd >= budget)
1862                 return rcvd;
1863
1864 poll_exit:
1865         napi_complete(napi);
1866
1867         rx_ctrl->rx_complete++;
1868
1869         if (rx_ctrl->ccb)
1870                 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1871
1872         return rcvd;
1873 }
1874
1875 #define BNAD_NAPI_POLL_QUOTA            64
1876 static void
1877 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1878 {
1879         struct bnad_rx_ctrl *rx_ctrl;
1880         int i;
1881
1882         /* Initialize & enable NAPI */
1883         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1884                 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1885                 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1886                                bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1887         }
1888 }
1889
1890 static void
1891 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1892 {
1893         int i;
1894
1895         /* First disable and then clean up */
1896         for (i = 0; i < bnad->num_rxp_per_rx; i++)
1897                 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1898 }
1899
1900 /* Should be held with conf_lock held */
1901 void
1902 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1903 {
1904         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1905         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1906         unsigned long flags;
1907
1908         if (!tx_info->tx)
1909                 return;
1910
1911         init_completion(&bnad->bnad_completions.tx_comp);
1912         spin_lock_irqsave(&bnad->bna_lock, flags);
1913         bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1914         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1915         wait_for_completion(&bnad->bnad_completions.tx_comp);
1916
1917         if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1918                 bnad_tx_msix_unregister(bnad, tx_info,
1919                         bnad->num_txq_per_tx);
1920
1921         spin_lock_irqsave(&bnad->bna_lock, flags);
1922         bna_tx_destroy(tx_info->tx);
1923         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1924
1925         tx_info->tx = NULL;
1926         tx_info->tx_id = 0;
1927
1928         bnad_tx_res_free(bnad, res_info);
1929 }
1930
1931 /* Should be held with conf_lock held */
1932 int
1933 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1934 {
1935         int err;
1936         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1937         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1938         struct bna_intr_info *intr_info =
1939                         &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1940         struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1941         static const struct bna_tx_event_cbfn tx_cbfn = {
1942                 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1943                 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1944                 .tx_stall_cbfn = bnad_cb_tx_stall,
1945                 .tx_resume_cbfn = bnad_cb_tx_resume,
1946                 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1947         };
1948
1949         struct bna_tx *tx;
1950         unsigned long flags;
1951
1952         tx_info->tx_id = tx_id;
1953
1954         /* Initialize the Tx object configuration */
1955         tx_config->num_txq = bnad->num_txq_per_tx;
1956         tx_config->txq_depth = bnad->txq_depth;
1957         tx_config->tx_type = BNA_TX_T_REGULAR;
1958         tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1959
1960         /* Get BNA's resource requirement for one tx object */
1961         spin_lock_irqsave(&bnad->bna_lock, flags);
1962         bna_tx_res_req(bnad->num_txq_per_tx,
1963                 bnad->txq_depth, res_info);
1964         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1965
1966         /* Fill Unmap Q memory requirements */
1967         BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1968                         bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1969                         bnad->txq_depth));
1970
1971         /* Allocate resources */
1972         err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1973         if (err)
1974                 return err;
1975
1976         /* Ask BNA to create one Tx object, supplying required resources */
1977         spin_lock_irqsave(&bnad->bna_lock, flags);
1978         tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1979                         tx_info);
1980         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1981         if (!tx)
1982                 goto err_return;
1983         tx_info->tx = tx;
1984
1985         INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1986                         (work_func_t)bnad_tx_cleanup);
1987
1988         /* Register ISR for the Tx object */
1989         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1990                 err = bnad_tx_msix_register(bnad, tx_info,
1991                         tx_id, bnad->num_txq_per_tx);
1992                 if (err)
1993                         goto err_return;
1994         }
1995
1996         spin_lock_irqsave(&bnad->bna_lock, flags);
1997         bna_tx_enable(tx);
1998         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1999
2000         return 0;
2001
2002 err_return:
2003         bnad_tx_res_free(bnad, res_info);
2004         return err;
2005 }
2006
2007 /* Setup the rx config for bna_rx_create */
2008 /* bnad decides the configuration */
2009 static void
2010 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2011 {
2012         memset(rx_config, 0, sizeof(*rx_config));
2013         rx_config->rx_type = BNA_RX_T_REGULAR;
2014         rx_config->num_paths = bnad->num_rxp_per_rx;
2015         rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2016
2017         if (bnad->num_rxp_per_rx > 1) {
2018                 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2019                 rx_config->rss_config.hash_type =
2020                                 (BFI_ENET_RSS_IPV6 |
2021                                  BFI_ENET_RSS_IPV6_TCP |
2022                                  BFI_ENET_RSS_IPV4 |
2023                                  BFI_ENET_RSS_IPV4_TCP);
2024                 rx_config->rss_config.hash_mask =
2025                                 bnad->num_rxp_per_rx - 1;
2026                 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
2027                         sizeof(rx_config->rss_config.toeplitz_hash_key));
2028         } else {
2029                 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2030                 memset(&rx_config->rss_config, 0,
2031                        sizeof(rx_config->rss_config));
2032         }
2033
2034         rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2035         rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2036
2037         /* BNA_RXP_SINGLE - one data-buffer queue
2038          * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2039          * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2040          */
2041         /* TODO: configurable param for queue type */
2042         rx_config->rxp_type = BNA_RXP_SLR;
2043
2044         if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2045             rx_config->frame_size > 4096) {
2046                 /* though size_routing_enable is set in SLR,
2047                  * small packets may get routed to same rxq.
2048                  * set buf_size to 2048 instead of PAGE_SIZE.
2049                  */
2050                 rx_config->q0_buf_size = 2048;
2051                 /* this should be in multiples of 2 */
2052                 rx_config->q0_num_vecs = 4;
2053                 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2054                 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2055         } else {
2056                 rx_config->q0_buf_size = rx_config->frame_size;
2057                 rx_config->q0_num_vecs = 1;
2058                 rx_config->q0_depth = bnad->rxq_depth;
2059         }
2060
2061         /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2062         if (rx_config->rxp_type == BNA_RXP_SLR) {
2063                 rx_config->q1_depth = bnad->rxq_depth;
2064                 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2065         }
2066
2067         rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
2068 }
2069
2070 static void
2071 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2072 {
2073         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2074         int i;
2075
2076         for (i = 0; i < bnad->num_rxp_per_rx; i++)
2077                 rx_info->rx_ctrl[i].bnad = bnad;
2078 }
2079
2080 /* Called with mutex_lock(&bnad->conf_mutex) held */
2081 u32
2082 bnad_reinit_rx(struct bnad *bnad)
2083 {
2084         struct net_device *netdev = bnad->netdev;
2085         u32 err = 0, current_err = 0;
2086         u32 rx_id = 0, count = 0;
2087         unsigned long flags;
2088
2089         /* destroy and create new rx objects */
2090         for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2091                 if (!bnad->rx_info[rx_id].rx)
2092                         continue;
2093                 bnad_destroy_rx(bnad, rx_id);
2094         }
2095
2096         spin_lock_irqsave(&bnad->bna_lock, flags);
2097         bna_enet_mtu_set(&bnad->bna.enet,
2098                          BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2099         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2100
2101         for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2102                 count++;
2103                 current_err = bnad_setup_rx(bnad, rx_id);
2104                 if (current_err && !err) {
2105                         err = current_err;
2106                         pr_err("RXQ:%u setup failed\n", rx_id);
2107                 }
2108         }
2109
2110         /* restore rx configuration */
2111         if (bnad->rx_info[0].rx && !err) {
2112                 bnad_restore_vlans(bnad, 0);
2113                 bnad_enable_default_bcast(bnad);
2114                 spin_lock_irqsave(&bnad->bna_lock, flags);
2115                 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2116                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2117                 bnad_set_rx_mode(netdev);
2118         }
2119
2120         return count;
2121 }
2122
2123 /* Called with bnad_conf_lock() held */
2124 void
2125 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2126 {
2127         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2128         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2129         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2130         unsigned long flags;
2131         int to_del = 0;
2132
2133         if (!rx_info->rx)
2134                 return;
2135
2136         if (0 == rx_id) {
2137                 spin_lock_irqsave(&bnad->bna_lock, flags);
2138                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2139                     test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2140                         clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2141                         to_del = 1;
2142                 }
2143                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2144                 if (to_del)
2145                         del_timer_sync(&bnad->dim_timer);
2146         }
2147
2148         init_completion(&bnad->bnad_completions.rx_comp);
2149         spin_lock_irqsave(&bnad->bna_lock, flags);
2150         bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2151         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2152         wait_for_completion(&bnad->bnad_completions.rx_comp);
2153
2154         if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2155                 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2156
2157         bnad_napi_delete(bnad, rx_id);
2158
2159         spin_lock_irqsave(&bnad->bna_lock, flags);
2160         bna_rx_destroy(rx_info->rx);
2161
2162         rx_info->rx = NULL;
2163         rx_info->rx_id = 0;
2164         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2165
2166         bnad_rx_res_free(bnad, res_info);
2167 }
2168
2169 /* Called with mutex_lock(&bnad->conf_mutex) held */
2170 int
2171 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2172 {
2173         int err;
2174         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2175         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2176         struct bna_intr_info *intr_info =
2177                         &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2178         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2179         static const struct bna_rx_event_cbfn rx_cbfn = {
2180                 .rcb_setup_cbfn = NULL,
2181                 .rcb_destroy_cbfn = NULL,
2182                 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2183                 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2184                 .rx_stall_cbfn = bnad_cb_rx_stall,
2185                 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2186                 .rx_post_cbfn = bnad_cb_rx_post,
2187         };
2188         struct bna_rx *rx;
2189         unsigned long flags;
2190
2191         rx_info->rx_id = rx_id;
2192
2193         /* Initialize the Rx object configuration */
2194         bnad_init_rx_config(bnad, rx_config);
2195
2196         /* Get BNA's resource requirement for one Rx object */
2197         spin_lock_irqsave(&bnad->bna_lock, flags);
2198         bna_rx_res_req(rx_config, res_info);
2199         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2200
2201         /* Fill Unmap Q memory requirements */
2202         BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2203                                  rx_config->num_paths,
2204                         (rx_config->q0_depth *
2205                          sizeof(struct bnad_rx_unmap)) +
2206                          sizeof(struct bnad_rx_unmap_q));
2207
2208         if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2209                 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2210                                          rx_config->num_paths,
2211                                 (rx_config->q1_depth *
2212                                  sizeof(struct bnad_rx_unmap) +
2213                                  sizeof(struct bnad_rx_unmap_q)));
2214         }
2215         /* Allocate resource */
2216         err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2217         if (err)
2218                 return err;
2219
2220         bnad_rx_ctrl_init(bnad, rx_id);
2221
2222         /* Ask BNA to create one Rx object, supplying required resources */
2223         spin_lock_irqsave(&bnad->bna_lock, flags);
2224         rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2225                         rx_info);
2226         if (!rx) {
2227                 err = -ENOMEM;
2228                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2229                 goto err_return;
2230         }
2231         rx_info->rx = rx;
2232         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2233
2234         INIT_WORK(&rx_info->rx_cleanup_work,
2235                         (work_func_t)(bnad_rx_cleanup));
2236
2237         /*
2238          * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2239          * so that IRQ handler cannot schedule NAPI at this point.
2240          */
2241         bnad_napi_add(bnad, rx_id);
2242
2243         /* Register ISR for the Rx object */
2244         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2245                 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2246                                                 rx_config->num_paths);
2247                 if (err)
2248                         goto err_return;
2249         }
2250
2251         spin_lock_irqsave(&bnad->bna_lock, flags);
2252         if (0 == rx_id) {
2253                 /* Set up Dynamic Interrupt Moderation Vector */
2254                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2255                         bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2256
2257                 /* Enable VLAN filtering only on the default Rx */
2258                 bna_rx_vlanfilter_enable(rx);
2259
2260                 /* Start the DIM timer */
2261                 bnad_dim_timer_start(bnad);
2262         }
2263
2264         bna_rx_enable(rx);
2265         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2266
2267         return 0;
2268
2269 err_return:
2270         bnad_destroy_rx(bnad, rx_id);
2271         return err;
2272 }
2273
2274 /* Called with conf_lock & bnad->bna_lock held */
2275 void
2276 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2277 {
2278         struct bnad_tx_info *tx_info;
2279
2280         tx_info = &bnad->tx_info[0];
2281         if (!tx_info->tx)
2282                 return;
2283
2284         bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2285 }
2286
2287 /* Called with conf_lock & bnad->bna_lock held */
2288 void
2289 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2290 {
2291         struct bnad_rx_info *rx_info;
2292         int     i;
2293
2294         for (i = 0; i < bnad->num_rx; i++) {
2295                 rx_info = &bnad->rx_info[i];
2296                 if (!rx_info->rx)
2297                         continue;
2298                 bna_rx_coalescing_timeo_set(rx_info->rx,
2299                                 bnad->rx_coalescing_timeo);
2300         }
2301 }
2302
2303 /*
2304  * Called with bnad->bna_lock held
2305  */
2306 int
2307 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2308 {
2309         int ret;
2310
2311         if (!is_valid_ether_addr(mac_addr))
2312                 return -EADDRNOTAVAIL;
2313
2314         /* If datapath is down, pretend everything went through */
2315         if (!bnad->rx_info[0].rx)
2316                 return 0;
2317
2318         ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2319         if (ret != BNA_CB_SUCCESS)
2320                 return -EADDRNOTAVAIL;
2321
2322         return 0;
2323 }
2324
2325 /* Should be called with conf_lock held */
2326 int
2327 bnad_enable_default_bcast(struct bnad *bnad)
2328 {
2329         struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2330         int ret;
2331         unsigned long flags;
2332
2333         init_completion(&bnad->bnad_completions.mcast_comp);
2334
2335         spin_lock_irqsave(&bnad->bna_lock, flags);
2336         ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2337                                 bnad_cb_rx_mcast_add);
2338         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2339
2340         if (ret == BNA_CB_SUCCESS)
2341                 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2342         else
2343                 return -ENODEV;
2344
2345         if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2346                 return -ENODEV;
2347
2348         return 0;
2349 }
2350
2351 /* Called with mutex_lock(&bnad->conf_mutex) held */
2352 void
2353 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2354 {
2355         u16 vid;
2356         unsigned long flags;
2357
2358         for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2359                 spin_lock_irqsave(&bnad->bna_lock, flags);
2360                 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2361                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2362         }
2363 }
2364
2365 /* Statistics utilities */
2366 void
2367 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2368 {
2369         int i, j;
2370
2371         for (i = 0; i < bnad->num_rx; i++) {
2372                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2373                         if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2374                                 stats->rx_packets += bnad->rx_info[i].
2375                                 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2376                                 stats->rx_bytes += bnad->rx_info[i].
2377                                         rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2378                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2379                                         bnad->rx_info[i].rx_ctrl[j].ccb->
2380                                         rcb[1]->rxq) {
2381                                         stats->rx_packets +=
2382                                                 bnad->rx_info[i].rx_ctrl[j].
2383                                                 ccb->rcb[1]->rxq->rx_packets;
2384                                         stats->rx_bytes +=
2385                                                 bnad->rx_info[i].rx_ctrl[j].
2386                                                 ccb->rcb[1]->rxq->rx_bytes;
2387                                 }
2388                         }
2389                 }
2390         }
2391         for (i = 0; i < bnad->num_tx; i++) {
2392                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2393                         if (bnad->tx_info[i].tcb[j]) {
2394                                 stats->tx_packets +=
2395                                 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2396                                 stats->tx_bytes +=
2397                                         bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2398                         }
2399                 }
2400         }
2401 }
2402
2403 /*
2404  * Must be called with the bna_lock held.
2405  */
2406 void
2407 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2408 {
2409         struct bfi_enet_stats_mac *mac_stats;
2410         u32 bmap;
2411         int i;
2412
2413         mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2414         stats->rx_errors =
2415                 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2416                 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2417                 mac_stats->rx_undersize;
2418         stats->tx_errors = mac_stats->tx_fcs_error +
2419                                         mac_stats->tx_undersize;
2420         stats->rx_dropped = mac_stats->rx_drop;
2421         stats->tx_dropped = mac_stats->tx_drop;
2422         stats->multicast = mac_stats->rx_multicast;
2423         stats->collisions = mac_stats->tx_total_collision;
2424
2425         stats->rx_length_errors = mac_stats->rx_frame_length_error;
2426
2427         /* receive ring buffer overflow  ?? */
2428
2429         stats->rx_crc_errors = mac_stats->rx_fcs_error;
2430         stats->rx_frame_errors = mac_stats->rx_alignment_error;
2431         /* recv'r fifo overrun */
2432         bmap = bna_rx_rid_mask(&bnad->bna);
2433         for (i = 0; bmap; i++) {
2434                 if (bmap & 1) {
2435                         stats->rx_fifo_errors +=
2436                                 bnad->stats.bna_stats->
2437                                         hw_stats.rxf_stats[i].frame_drops;
2438                         break;
2439                 }
2440                 bmap >>= 1;
2441         }
2442 }
2443
2444 static void
2445 bnad_mbox_irq_sync(struct bnad *bnad)
2446 {
2447         u32 irq;
2448         unsigned long flags;
2449
2450         spin_lock_irqsave(&bnad->bna_lock, flags);
2451         if (bnad->cfg_flags & BNAD_CF_MSIX)
2452                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2453         else
2454                 irq = bnad->pcidev->irq;
2455         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2456
2457         synchronize_irq(irq);
2458 }
2459
2460 /* Utility used by bnad_start_xmit, for doing TSO */
2461 static int
2462 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2463 {
2464         int err;
2465
2466         if (skb_header_cloned(skb)) {
2467                 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2468                 if (err) {
2469                         BNAD_UPDATE_CTR(bnad, tso_err);
2470                         return err;
2471                 }
2472         }
2473
2474         /*
2475          * For TSO, the TCP checksum field is seeded with pseudo-header sum
2476          * excluding the length field.
2477          */
2478         if (skb->protocol == htons(ETH_P_IP)) {
2479                 struct iphdr *iph = ip_hdr(skb);
2480
2481                 /* Do we really need these? */
2482                 iph->tot_len = 0;
2483                 iph->check = 0;
2484
2485                 tcp_hdr(skb)->check =
2486                         ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2487                                            IPPROTO_TCP, 0);
2488                 BNAD_UPDATE_CTR(bnad, tso4);
2489         } else {
2490                 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2491
2492                 ipv6h->payload_len = 0;
2493                 tcp_hdr(skb)->check =
2494                         ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2495                                          IPPROTO_TCP, 0);
2496                 BNAD_UPDATE_CTR(bnad, tso6);
2497         }
2498
2499         return 0;
2500 }
2501
2502 /*
2503  * Initialize Q numbers depending on Rx Paths
2504  * Called with bnad->bna_lock held, because of cfg_flags
2505  * access.
2506  */
2507 static void
2508 bnad_q_num_init(struct bnad *bnad)
2509 {
2510         int rxps;
2511
2512         rxps = min((uint)num_online_cpus(),
2513                         (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2514
2515         if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2516                 rxps = 1;       /* INTx */
2517
2518         bnad->num_rx = 1;
2519         bnad->num_tx = 1;
2520         bnad->num_rxp_per_rx = rxps;
2521         bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2522 }
2523
2524 /*
2525  * Adjusts the Q numbers, given a number of msix vectors
2526  * Give preference to RSS as opposed to Tx priority Queues,
2527  * in such a case, just use 1 Tx Q
2528  * Called with bnad->bna_lock held b'cos of cfg_flags access
2529  */
2530 static void
2531 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2532 {
2533         bnad->num_txq_per_tx = 1;
2534         if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2535              bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2536             (bnad->cfg_flags & BNAD_CF_MSIX)) {
2537                 bnad->num_rxp_per_rx = msix_vectors -
2538                         (bnad->num_tx * bnad->num_txq_per_tx) -
2539                         BNAD_MAILBOX_MSIX_VECTORS;
2540         } else
2541                 bnad->num_rxp_per_rx = 1;
2542 }
2543
2544 /* Enable / disable ioceth */
2545 static int
2546 bnad_ioceth_disable(struct bnad *bnad)
2547 {
2548         unsigned long flags;
2549         int err = 0;
2550
2551         spin_lock_irqsave(&bnad->bna_lock, flags);
2552         init_completion(&bnad->bnad_completions.ioc_comp);
2553         bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2554         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2555
2556         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2557                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2558
2559         err = bnad->bnad_completions.ioc_comp_status;
2560         return err;
2561 }
2562
2563 static int
2564 bnad_ioceth_enable(struct bnad *bnad)
2565 {
2566         int err = 0;
2567         unsigned long flags;
2568
2569         spin_lock_irqsave(&bnad->bna_lock, flags);
2570         init_completion(&bnad->bnad_completions.ioc_comp);
2571         bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2572         bna_ioceth_enable(&bnad->bna.ioceth);
2573         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2574
2575         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2576                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2577
2578         err = bnad->bnad_completions.ioc_comp_status;
2579
2580         return err;
2581 }
2582
2583 /* Free BNA resources */
2584 static void
2585 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2586                 u32 res_val_max)
2587 {
2588         int i;
2589
2590         for (i = 0; i < res_val_max; i++)
2591                 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2592 }
2593
2594 /* Allocates memory and interrupt resources for BNA */
2595 static int
2596 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2597                 u32 res_val_max)
2598 {
2599         int i, err;
2600
2601         for (i = 0; i < res_val_max; i++) {
2602                 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2603                 if (err)
2604                         goto err_return;
2605         }
2606         return 0;
2607
2608 err_return:
2609         bnad_res_free(bnad, res_info, res_val_max);
2610         return err;
2611 }
2612
2613 /* Interrupt enable / disable */
2614 static void
2615 bnad_enable_msix(struct bnad *bnad)
2616 {
2617         int i, ret;
2618         unsigned long flags;
2619
2620         spin_lock_irqsave(&bnad->bna_lock, flags);
2621         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2622                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2623                 return;
2624         }
2625         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2626
2627         if (bnad->msix_table)
2628                 return;
2629
2630         bnad->msix_table =
2631                 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2632
2633         if (!bnad->msix_table)
2634                 goto intx_mode;
2635
2636         for (i = 0; i < bnad->msix_num; i++)
2637                 bnad->msix_table[i].entry = i;
2638
2639         ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2640         if (ret > 0) {
2641                 /* Not enough MSI-X vectors. */
2642                 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2643                         ret, bnad->msix_num);
2644
2645                 spin_lock_irqsave(&bnad->bna_lock, flags);
2646                 /* ret = #of vectors that we got */
2647                 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2648                         (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2649                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2650
2651                 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2652                          BNAD_MAILBOX_MSIX_VECTORS;
2653
2654                 if (bnad->msix_num > ret)
2655                         goto intx_mode;
2656
2657                 /* Try once more with adjusted numbers */
2658                 /* If this fails, fall back to INTx */
2659                 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2660                                       bnad->msix_num);
2661                 if (ret)
2662                         goto intx_mode;
2663
2664         } else if (ret < 0)
2665                 goto intx_mode;
2666
2667         pci_intx(bnad->pcidev, 0);
2668
2669         return;
2670
2671 intx_mode:
2672         pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2673
2674         kfree(bnad->msix_table);
2675         bnad->msix_table = NULL;
2676         bnad->msix_num = 0;
2677         spin_lock_irqsave(&bnad->bna_lock, flags);
2678         bnad->cfg_flags &= ~BNAD_CF_MSIX;
2679         bnad_q_num_init(bnad);
2680         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2681 }
2682
2683 static void
2684 bnad_disable_msix(struct bnad *bnad)
2685 {
2686         u32 cfg_flags;
2687         unsigned long flags;
2688
2689         spin_lock_irqsave(&bnad->bna_lock, flags);
2690         cfg_flags = bnad->cfg_flags;
2691         if (bnad->cfg_flags & BNAD_CF_MSIX)
2692                 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2693         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2694
2695         if (cfg_flags & BNAD_CF_MSIX) {
2696                 pci_disable_msix(bnad->pcidev);
2697                 kfree(bnad->msix_table);
2698                 bnad->msix_table = NULL;
2699         }
2700 }
2701
2702 /* Netdev entry points */
2703 static int
2704 bnad_open(struct net_device *netdev)
2705 {
2706         int err;
2707         struct bnad *bnad = netdev_priv(netdev);
2708         struct bna_pause_config pause_config;
2709         unsigned long flags;
2710
2711         mutex_lock(&bnad->conf_mutex);
2712
2713         /* Tx */
2714         err = bnad_setup_tx(bnad, 0);
2715         if (err)
2716                 goto err_return;
2717
2718         /* Rx */
2719         err = bnad_setup_rx(bnad, 0);
2720         if (err)
2721                 goto cleanup_tx;
2722
2723         /* Port */
2724         pause_config.tx_pause = 0;
2725         pause_config.rx_pause = 0;
2726
2727         spin_lock_irqsave(&bnad->bna_lock, flags);
2728         bna_enet_mtu_set(&bnad->bna.enet,
2729                          BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2730         bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2731         bna_enet_enable(&bnad->bna.enet);
2732         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2733
2734         /* Enable broadcast */
2735         bnad_enable_default_bcast(bnad);
2736
2737         /* Restore VLANs, if any */
2738         bnad_restore_vlans(bnad, 0);
2739
2740         /* Set the UCAST address */
2741         spin_lock_irqsave(&bnad->bna_lock, flags);
2742         bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2743         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2744
2745         /* Start the stats timer */
2746         bnad_stats_timer_start(bnad);
2747
2748         mutex_unlock(&bnad->conf_mutex);
2749
2750         return 0;
2751
2752 cleanup_tx:
2753         bnad_destroy_tx(bnad, 0);
2754
2755 err_return:
2756         mutex_unlock(&bnad->conf_mutex);
2757         return err;
2758 }
2759
2760 static int
2761 bnad_stop(struct net_device *netdev)
2762 {
2763         struct bnad *bnad = netdev_priv(netdev);
2764         unsigned long flags;
2765
2766         mutex_lock(&bnad->conf_mutex);
2767
2768         /* Stop the stats timer */
2769         bnad_stats_timer_stop(bnad);
2770
2771         init_completion(&bnad->bnad_completions.enet_comp);
2772
2773         spin_lock_irqsave(&bnad->bna_lock, flags);
2774         bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2775                         bnad_cb_enet_disabled);
2776         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2777
2778         wait_for_completion(&bnad->bnad_completions.enet_comp);
2779
2780         bnad_destroy_tx(bnad, 0);
2781         bnad_destroy_rx(bnad, 0);
2782
2783         /* Synchronize mailbox IRQ */
2784         bnad_mbox_irq_sync(bnad);
2785
2786         mutex_unlock(&bnad->conf_mutex);
2787
2788         return 0;
2789 }
2790
2791 /* TX */
2792 /* Returns 0 for success */
2793 static int
2794 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2795                     struct sk_buff *skb, struct bna_txq_entry *txqent)
2796 {
2797         u16 flags = 0;
2798         u32 gso_size;
2799         u16 vlan_tag = 0;
2800
2801         if (vlan_tx_tag_present(skb)) {
2802                 vlan_tag = (u16)vlan_tx_tag_get(skb);
2803                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2804         }
2805         if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2806                 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2807                                 | (vlan_tag & 0x1fff);
2808                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2809         }
2810         txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2811
2812         if (skb_is_gso(skb)) {
2813                 gso_size = skb_shinfo(skb)->gso_size;
2814                 if (unlikely(gso_size > bnad->netdev->mtu)) {
2815                         BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2816                         return -EINVAL;
2817                 }
2818                 if (unlikely((gso_size + skb_transport_offset(skb) +
2819                               tcp_hdrlen(skb)) >= skb->len)) {
2820                         txqent->hdr.wi.opcode =
2821                                 __constant_htons(BNA_TXQ_WI_SEND);
2822                         txqent->hdr.wi.lso_mss = 0;
2823                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2824                 } else {
2825                         txqent->hdr.wi.opcode =
2826                                 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2827                         txqent->hdr.wi.lso_mss = htons(gso_size);
2828                 }
2829
2830                 if (bnad_tso_prepare(bnad, skb)) {
2831                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2832                         return -EINVAL;
2833                 }
2834
2835                 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2836                 txqent->hdr.wi.l4_hdr_size_n_offset =
2837                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2838                         tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2839         } else  {
2840                 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
2841                 txqent->hdr.wi.lso_mss = 0;
2842
2843                 if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
2844                         BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2845                         return -EINVAL;
2846                 }
2847
2848                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2849                         u8 proto = 0;
2850
2851                         if (skb->protocol == __constant_htons(ETH_P_IP))
2852                                 proto = ip_hdr(skb)->protocol;
2853 #ifdef NETIF_F_IPV6_CSUM
2854                         else if (skb->protocol ==
2855                                  __constant_htons(ETH_P_IPV6)) {
2856                                 /* nexthdr may not be TCP immediately. */
2857                                 proto = ipv6_hdr(skb)->nexthdr;
2858                         }
2859 #endif
2860                         if (proto == IPPROTO_TCP) {
2861                                 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2862                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2863                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2864                                               (0, skb_transport_offset(skb)));
2865
2866                                 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2867
2868                                 if (unlikely(skb_headlen(skb) <
2869                                             skb_transport_offset(skb) +
2870                                     tcp_hdrlen(skb))) {
2871                                         BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2872                                         return -EINVAL;
2873                                 }
2874                         } else if (proto == IPPROTO_UDP) {
2875                                 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2876                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2877                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2878                                               (0, skb_transport_offset(skb)));
2879
2880                                 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2881                                 if (unlikely(skb_headlen(skb) <
2882                                             skb_transport_offset(skb) +
2883                                     sizeof(struct udphdr))) {
2884                                         BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2885                                         return -EINVAL;
2886                                 }
2887                         } else {
2888
2889                                 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2890                                 return -EINVAL;
2891                         }
2892                 } else
2893                         txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2894         }
2895
2896         txqent->hdr.wi.flags = htons(flags);
2897         txqent->hdr.wi.frame_length = htonl(skb->len);
2898
2899         return 0;
2900 }
2901
2902 /*
2903  * bnad_start_xmit : Netdev entry point for Transmit
2904  *                   Called under lock held by net_device
2905  */
2906 static netdev_tx_t
2907 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2908 {
2909         struct bnad *bnad = netdev_priv(netdev);
2910         u32 txq_id = 0;
2911         struct bna_tcb *tcb = NULL;
2912         struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2913         u32             prod, q_depth, vect_id;
2914         u32             wis, vectors, len;
2915         int             i;
2916         dma_addr_t              dma_addr;
2917         struct bna_txq_entry *txqent;
2918
2919         len = skb_headlen(skb);
2920
2921         /* Sanity checks for the skb */
2922
2923         if (unlikely(skb->len <= ETH_HLEN)) {
2924                 dev_kfree_skb(skb);
2925                 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2926                 return NETDEV_TX_OK;
2927         }
2928         if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2929                 dev_kfree_skb(skb);
2930                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2931                 return NETDEV_TX_OK;
2932         }
2933         if (unlikely(len == 0)) {
2934                 dev_kfree_skb(skb);
2935                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2936                 return NETDEV_TX_OK;
2937         }
2938
2939         tcb = bnad->tx_info[0].tcb[txq_id];
2940         q_depth = tcb->q_depth;
2941         prod = tcb->producer_index;
2942
2943         unmap_q = tcb->unmap_q;
2944
2945         /*
2946          * Takes care of the Tx that is scheduled between clearing the flag
2947          * and the netif_tx_stop_all_queues() call.
2948          */
2949         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2950                 dev_kfree_skb(skb);
2951                 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2952                 return NETDEV_TX_OK;
2953         }
2954
2955         vectors = 1 + skb_shinfo(skb)->nr_frags;
2956         wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
2957
2958         if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2959                 dev_kfree_skb(skb);
2960                 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2961                 return NETDEV_TX_OK;
2962         }
2963
2964         /* Check for available TxQ resources */
2965         if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2966                 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2967                     !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2968                         u32 sent;
2969                         sent = bnad_txcmpl_process(bnad, tcb);
2970                         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2971                                 bna_ib_ack(tcb->i_dbell, sent);
2972                         smp_mb__before_clear_bit();
2973                         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2974                 } else {
2975                         netif_stop_queue(netdev);
2976                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2977                 }
2978
2979                 smp_mb();
2980                 /*
2981                  * Check again to deal with race condition between
2982                  * netif_stop_queue here, and netif_wake_queue in
2983                  * interrupt handler which is not inside netif tx lock.
2984                  */
2985                 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2986                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2987                         return NETDEV_TX_BUSY;
2988                 } else {
2989                         netif_wake_queue(netdev);
2990                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2991                 }
2992         }
2993
2994         txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
2995         head_unmap = &unmap_q[prod];
2996
2997         /* Program the opcode, flags, frame_len, num_vectors in WI */
2998         if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
2999                 dev_kfree_skb(skb);
3000                 return NETDEV_TX_OK;
3001         }
3002         txqent->hdr.wi.reserved = 0;
3003         txqent->hdr.wi.num_vectors = vectors;
3004
3005         head_unmap->skb = skb;
3006         head_unmap->nvecs = 0;
3007
3008         /* Program the vectors */
3009         unmap = head_unmap;
3010         dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3011                                   len, DMA_TO_DEVICE);
3012         BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3013         txqent->vector[0].length = htons(len);
3014         dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3015         head_unmap->nvecs++;
3016
3017         for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3018                 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3019                 u16             size = skb_frag_size(frag);
3020
3021                 if (unlikely(size == 0)) {
3022                         /* Undo the changes starting at tcb->producer_index */
3023                         bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3024                                 tcb->producer_index);
3025                         dev_kfree_skb(skb);
3026                         BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3027                         return NETDEV_TX_OK;
3028                 }
3029
3030                 len += size;
3031
3032                 vect_id++;
3033                 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3034                         vect_id = 0;
3035                         BNA_QE_INDX_INC(prod, q_depth);
3036                         txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3037                         txqent->hdr.wi_ext.opcode =
3038                                 __constant_htons(BNA_TXQ_WI_EXTENSION);
3039                         unmap = &unmap_q[prod];
3040                 }
3041
3042                 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3043                                             0, size, DMA_TO_DEVICE);
3044                 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3045                 txqent->vector[vect_id].length = htons(size);
3046                 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3047                                                 dma_addr);
3048                 head_unmap->nvecs++;
3049         }
3050
3051         if (unlikely(len != skb->len)) {
3052                 /* Undo the changes starting at tcb->producer_index */
3053                 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3054                 dev_kfree_skb(skb);
3055                 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3056                 return NETDEV_TX_OK;
3057         }
3058
3059         BNA_QE_INDX_INC(prod, q_depth);
3060         tcb->producer_index = prod;
3061
3062         smp_mb();
3063
3064         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3065                 return NETDEV_TX_OK;
3066
3067         skb_tx_timestamp(skb);
3068
3069         bna_txq_prod_indx_doorbell(tcb);
3070         smp_mb();
3071
3072         return NETDEV_TX_OK;
3073 }
3074
3075 /*
3076  * Used spin_lock to synchronize reading of stats structures, which
3077  * is written by BNA under the same lock.
3078  */
3079 static struct rtnl_link_stats64 *
3080 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3081 {
3082         struct bnad *bnad = netdev_priv(netdev);
3083         unsigned long flags;
3084
3085         spin_lock_irqsave(&bnad->bna_lock, flags);
3086
3087         bnad_netdev_qstats_fill(bnad, stats);
3088         bnad_netdev_hwstats_fill(bnad, stats);
3089
3090         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3091
3092         return stats;
3093 }
3094
3095 static void
3096 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3097 {
3098         struct net_device *netdev = bnad->netdev;
3099         int uc_count = netdev_uc_count(netdev);
3100         enum bna_cb_status ret;
3101         u8 *mac_list;
3102         struct netdev_hw_addr *ha;
3103         int entry;
3104
3105         if (netdev_uc_empty(bnad->netdev)) {
3106                 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3107                 return;
3108         }
3109
3110         if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3111                 goto mode_default;
3112
3113         mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3114         if (mac_list == NULL)
3115                 goto mode_default;
3116
3117         entry = 0;
3118         netdev_for_each_uc_addr(ha, netdev) {
3119                 memcpy(&mac_list[entry * ETH_ALEN],
3120                        &ha->addr[0], ETH_ALEN);
3121                 entry++;
3122         }
3123
3124         ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
3125                         mac_list, NULL);
3126         kfree(mac_list);
3127
3128         if (ret != BNA_CB_SUCCESS)
3129                 goto mode_default;
3130
3131         return;
3132
3133         /* ucast packets not in UCAM are routed to default function */
3134 mode_default:
3135         bnad->cfg_flags |= BNAD_CF_DEFAULT;
3136         bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3137 }
3138
3139 static void
3140 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3141 {
3142         struct net_device *netdev = bnad->netdev;
3143         int mc_count = netdev_mc_count(netdev);
3144         enum bna_cb_status ret;
3145         u8 *mac_list;
3146
3147         if (netdev->flags & IFF_ALLMULTI)
3148                 goto mode_allmulti;
3149
3150         if (netdev_mc_empty(netdev))
3151                 return;
3152
3153         if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3154                 goto mode_allmulti;
3155
3156         mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3157
3158         if (mac_list == NULL)
3159                 goto mode_allmulti;
3160
3161         memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
3162
3163         /* copy rest of the MCAST addresses */
3164         bnad_netdev_mc_list_get(netdev, mac_list);
3165         ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
3166                         mac_list, NULL);
3167         kfree(mac_list);
3168
3169         if (ret != BNA_CB_SUCCESS)
3170                 goto mode_allmulti;
3171
3172         return;
3173
3174 mode_allmulti:
3175         bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3176         bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
3177 }
3178
3179 void
3180 bnad_set_rx_mode(struct net_device *netdev)
3181 {
3182         struct bnad *bnad = netdev_priv(netdev);
3183         enum bna_rxmode new_mode, mode_mask;
3184         unsigned long flags;
3185
3186         spin_lock_irqsave(&bnad->bna_lock, flags);
3187
3188         if (bnad->rx_info[0].rx == NULL) {
3189                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3190                 return;
3191         }
3192
3193         /* clear bnad flags to update it with new settings */
3194         bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3195                         BNAD_CF_ALLMULTI);
3196
3197         new_mode = 0;
3198         if (netdev->flags & IFF_PROMISC) {
3199                 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3200                 bnad->cfg_flags |= BNAD_CF_PROMISC;
3201         } else {
3202                 bnad_set_rx_mcast_fltr(bnad);
3203
3204                 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3205                         new_mode |= BNA_RXMODE_ALLMULTI;
3206
3207                 bnad_set_rx_ucast_fltr(bnad);
3208
3209                 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3210                         new_mode |= BNA_RXMODE_DEFAULT;
3211         }
3212
3213         mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3214                         BNA_RXMODE_ALLMULTI;
3215         bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
3216
3217         if (bnad->cfg_flags & BNAD_CF_PROMISC)
3218                 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3219         else
3220                 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3221
3222         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3223 }
3224
3225 /*
3226  * bna_lock is used to sync writes to netdev->addr
3227  * conf_lock cannot be used since this call may be made
3228  * in a non-blocking context.
3229  */
3230 static int
3231 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
3232 {
3233         int err;
3234         struct bnad *bnad = netdev_priv(netdev);
3235         struct sockaddr *sa = (struct sockaddr *)mac_addr;
3236         unsigned long flags;
3237
3238         spin_lock_irqsave(&bnad->bna_lock, flags);
3239
3240         err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3241
3242         if (!err)
3243                 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
3244
3245         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3246
3247         return err;
3248 }
3249
3250 static int
3251 bnad_mtu_set(struct bnad *bnad, int frame_size)
3252 {
3253         unsigned long flags;
3254
3255         init_completion(&bnad->bnad_completions.mtu_comp);
3256
3257         spin_lock_irqsave(&bnad->bna_lock, flags);
3258         bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3259         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3260
3261         wait_for_completion(&bnad->bnad_completions.mtu_comp);
3262
3263         return bnad->bnad_completions.mtu_comp_status;
3264 }
3265
3266 static int
3267 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3268 {
3269         int err, mtu;
3270         struct bnad *bnad = netdev_priv(netdev);
3271         u32 rx_count = 0, frame, new_frame;
3272
3273         if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3274                 return -EINVAL;
3275
3276         mutex_lock(&bnad->conf_mutex);
3277
3278         mtu = netdev->mtu;
3279         netdev->mtu = new_mtu;
3280
3281         frame = BNAD_FRAME_SIZE(mtu);
3282         new_frame = BNAD_FRAME_SIZE(new_mtu);
3283
3284         /* check if multi-buffer needs to be enabled */
3285         if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3286             netif_running(bnad->netdev)) {
3287                 /* only when transition is over 4K */
3288                 if ((frame <= 4096 && new_frame > 4096) ||
3289                     (frame > 4096 && new_frame <= 4096))
3290                         rx_count = bnad_reinit_rx(bnad);
3291         }
3292
3293         /* rx_count > 0 - new rx created
3294          *      - Linux set err = 0 and return
3295          */
3296         err = bnad_mtu_set(bnad, new_frame);
3297         if (err)
3298                 err = -EBUSY;
3299
3300         mutex_unlock(&bnad->conf_mutex);
3301         return err;
3302 }
3303
3304 static int
3305 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3306 {
3307         struct bnad *bnad = netdev_priv(netdev);
3308         unsigned long flags;
3309
3310         if (!bnad->rx_info[0].rx)
3311                 return 0;
3312
3313         mutex_lock(&bnad->conf_mutex);
3314
3315         spin_lock_irqsave(&bnad->bna_lock, flags);
3316         bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3317         set_bit(vid, bnad->active_vlans);
3318         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3319
3320         mutex_unlock(&bnad->conf_mutex);
3321
3322         return 0;
3323 }
3324
3325 static int
3326 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3327 {
3328         struct bnad *bnad = netdev_priv(netdev);
3329         unsigned long flags;
3330
3331         if (!bnad->rx_info[0].rx)
3332                 return 0;
3333
3334         mutex_lock(&bnad->conf_mutex);
3335
3336         spin_lock_irqsave(&bnad->bna_lock, flags);
3337         clear_bit(vid, bnad->active_vlans);
3338         bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3339         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3340
3341         mutex_unlock(&bnad->conf_mutex);
3342
3343         return 0;
3344 }
3345
3346 #ifdef CONFIG_NET_POLL_CONTROLLER
3347 static void
3348 bnad_netpoll(struct net_device *netdev)
3349 {
3350         struct bnad *bnad = netdev_priv(netdev);
3351         struct bnad_rx_info *rx_info;
3352         struct bnad_rx_ctrl *rx_ctrl;
3353         u32 curr_mask;
3354         int i, j;
3355
3356         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3357                 bna_intx_disable(&bnad->bna, curr_mask);
3358                 bnad_isr(bnad->pcidev->irq, netdev);
3359                 bna_intx_enable(&bnad->bna, curr_mask);
3360         } else {
3361                 /*
3362                  * Tx processing may happen in sending context, so no need
3363                  * to explicitly process completions here
3364                  */
3365
3366                 /* Rx processing */
3367                 for (i = 0; i < bnad->num_rx; i++) {
3368                         rx_info = &bnad->rx_info[i];
3369                         if (!rx_info->rx)
3370                                 continue;
3371                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3372                                 rx_ctrl = &rx_info->rx_ctrl[j];
3373                                 if (rx_ctrl->ccb)
3374                                         bnad_netif_rx_schedule_poll(bnad,
3375                                                             rx_ctrl->ccb);
3376                         }
3377                 }
3378         }
3379 }
3380 #endif
3381
3382 static const struct net_device_ops bnad_netdev_ops = {
3383         .ndo_open               = bnad_open,
3384         .ndo_stop               = bnad_stop,
3385         .ndo_start_xmit         = bnad_start_xmit,
3386         .ndo_get_stats64                = bnad_get_stats64,
3387         .ndo_set_rx_mode        = bnad_set_rx_mode,
3388         .ndo_validate_addr      = eth_validate_addr,
3389         .ndo_set_mac_address    = bnad_set_mac_address,
3390         .ndo_change_mtu         = bnad_change_mtu,
3391         .ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3392         .ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3393 #ifdef CONFIG_NET_POLL_CONTROLLER
3394         .ndo_poll_controller    = bnad_netpoll
3395 #endif
3396 };
3397
3398 static void
3399 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3400 {
3401         struct net_device *netdev = bnad->netdev;
3402
3403         netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3404                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3405                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
3406
3407         netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3408                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3409                 NETIF_F_TSO | NETIF_F_TSO6;
3410
3411         netdev->features |= netdev->hw_features |
3412                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3413
3414         if (using_dac)
3415                 netdev->features |= NETIF_F_HIGHDMA;
3416
3417         netdev->mem_start = bnad->mmio_start;
3418         netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3419
3420         netdev->netdev_ops = &bnad_netdev_ops;
3421         bnad_set_ethtool_ops(netdev);
3422 }
3423
3424 /*
3425  * 1. Initialize the bnad structure
3426  * 2. Setup netdev pointer in pci_dev
3427  * 3. Initialize no. of TxQ & CQs & MSIX vectors
3428  * 4. Initialize work queue.
3429  */
3430 static int
3431 bnad_init(struct bnad *bnad,
3432           struct pci_dev *pdev, struct net_device *netdev)
3433 {
3434         unsigned long flags;
3435
3436         SET_NETDEV_DEV(netdev, &pdev->dev);
3437         pci_set_drvdata(pdev, netdev);
3438
3439         bnad->netdev = netdev;
3440         bnad->pcidev = pdev;
3441         bnad->mmio_start = pci_resource_start(pdev, 0);
3442         bnad->mmio_len = pci_resource_len(pdev, 0);
3443         bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3444         if (!bnad->bar0) {
3445                 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3446                 return -ENOMEM;
3447         }
3448         pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3449                (unsigned long long) bnad->mmio_len);
3450
3451         spin_lock_irqsave(&bnad->bna_lock, flags);
3452         if (!bnad_msix_disable)
3453                 bnad->cfg_flags = BNAD_CF_MSIX;
3454
3455         bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3456
3457         bnad_q_num_init(bnad);
3458         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3459
3460         bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3461                 (bnad->num_rx * bnad->num_rxp_per_rx) +
3462                          BNAD_MAILBOX_MSIX_VECTORS;
3463
3464         bnad->txq_depth = BNAD_TXQ_DEPTH;
3465         bnad->rxq_depth = BNAD_RXQ_DEPTH;
3466
3467         bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3468         bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3469
3470         sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3471         bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3472         if (!bnad->work_q) {
3473                 iounmap(bnad->bar0);
3474                 return -ENOMEM;
3475         }
3476
3477         return 0;
3478 }
3479
3480 /*
3481  * Must be called after bnad_pci_uninit()
3482  * so that iounmap() and pci_set_drvdata(NULL)
3483  * happens only after PCI uninitialization.
3484  */
3485 static void
3486 bnad_uninit(struct bnad *bnad)
3487 {
3488         if (bnad->work_q) {
3489                 flush_workqueue(bnad->work_q);
3490                 destroy_workqueue(bnad->work_q);
3491                 bnad->work_q = NULL;
3492         }
3493
3494         if (bnad->bar0)
3495                 iounmap(bnad->bar0);
3496 }
3497
3498 /*
3499  * Initialize locks
3500         a) Per ioceth mutes used for serializing configuration
3501            changes from OS interface
3502         b) spin lock used to protect bna state machine
3503  */
3504 static void
3505 bnad_lock_init(struct bnad *bnad)
3506 {
3507         spin_lock_init(&bnad->bna_lock);
3508         mutex_init(&bnad->conf_mutex);
3509         mutex_init(&bnad_list_mutex);
3510 }
3511
3512 static void
3513 bnad_lock_uninit(struct bnad *bnad)
3514 {
3515         mutex_destroy(&bnad->conf_mutex);
3516         mutex_destroy(&bnad_list_mutex);
3517 }
3518
3519 /* PCI Initialization */
3520 static int
3521 bnad_pci_init(struct bnad *bnad,
3522               struct pci_dev *pdev, bool *using_dac)
3523 {
3524         int err;
3525
3526         err = pci_enable_device(pdev);
3527         if (err)
3528                 return err;
3529         err = pci_request_regions(pdev, BNAD_NAME);
3530         if (err)
3531                 goto disable_device;
3532         if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3533                 *using_dac = true;
3534         } else {
3535                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3536                 if (err)
3537                         goto release_regions;
3538                 *using_dac = false;
3539         }
3540         pci_set_master(pdev);
3541         return 0;
3542
3543 release_regions:
3544         pci_release_regions(pdev);
3545 disable_device:
3546         pci_disable_device(pdev);
3547
3548         return err;
3549 }
3550
3551 static void
3552 bnad_pci_uninit(struct pci_dev *pdev)
3553 {
3554         pci_release_regions(pdev);
3555         pci_disable_device(pdev);
3556 }
3557
3558 static int
3559 bnad_pci_probe(struct pci_dev *pdev,
3560                 const struct pci_device_id *pcidev_id)
3561 {
3562         bool    using_dac;
3563         int     err;
3564         struct bnad *bnad;
3565         struct bna *bna;
3566         struct net_device *netdev;
3567         struct bfa_pcidev pcidev_info;
3568         unsigned long flags;
3569
3570         pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3571                pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3572
3573         mutex_lock(&bnad_fwimg_mutex);
3574         if (!cna_get_firmware_buf(pdev)) {
3575                 mutex_unlock(&bnad_fwimg_mutex);
3576                 pr_warn("Failed to load Firmware Image!\n");
3577                 return -ENODEV;
3578         }
3579         mutex_unlock(&bnad_fwimg_mutex);
3580
3581         /*
3582          * Allocates sizeof(struct net_device + struct bnad)
3583          * bnad = netdev->priv
3584          */
3585         netdev = alloc_etherdev(sizeof(struct bnad));
3586         if (!netdev) {
3587                 err = -ENOMEM;
3588                 return err;
3589         }
3590         bnad = netdev_priv(netdev);
3591         bnad_lock_init(bnad);
3592         bnad_add_to_list(bnad);
3593
3594         mutex_lock(&bnad->conf_mutex);
3595         /*
3596          * PCI initialization
3597          *      Output : using_dac = 1 for 64 bit DMA
3598          *                         = 0 for 32 bit DMA
3599          */
3600         using_dac = false;
3601         err = bnad_pci_init(bnad, pdev, &using_dac);
3602         if (err)
3603                 goto unlock_mutex;
3604
3605         /*
3606          * Initialize bnad structure
3607          * Setup relation between pci_dev & netdev
3608          */
3609         err = bnad_init(bnad, pdev, netdev);
3610         if (err)
3611                 goto pci_uninit;
3612
3613         /* Initialize netdev structure, set up ethtool ops */
3614         bnad_netdev_init(bnad, using_dac);
3615
3616         /* Set link to down state */
3617         netif_carrier_off(netdev);
3618
3619         /* Setup the debugfs node for this bfad */
3620         if (bna_debugfs_enable)
3621                 bnad_debugfs_init(bnad);
3622
3623         /* Get resource requirement form bna */
3624         spin_lock_irqsave(&bnad->bna_lock, flags);
3625         bna_res_req(&bnad->res_info[0]);
3626         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3627
3628         /* Allocate resources from bna */
3629         err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3630         if (err)
3631                 goto drv_uninit;
3632
3633         bna = &bnad->bna;
3634
3635         /* Setup pcidev_info for bna_init() */
3636         pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3637         pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3638         pcidev_info.device_id = bnad->pcidev->device;
3639         pcidev_info.pci_bar_kva = bnad->bar0;
3640
3641         spin_lock_irqsave(&bnad->bna_lock, flags);
3642         bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3643         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3644
3645         bnad->stats.bna_stats = &bna->stats;
3646
3647         bnad_enable_msix(bnad);
3648         err = bnad_mbox_irq_alloc(bnad);
3649         if (err)
3650                 goto res_free;
3651
3652         /* Set up timers */
3653         setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3654                                 ((unsigned long)bnad));
3655         setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3656                                 ((unsigned long)bnad));
3657         setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3658                                 ((unsigned long)bnad));
3659         setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3660                                 ((unsigned long)bnad));
3661
3662         /* Now start the timer before calling IOC */
3663         mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3664                   jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3665
3666         /*
3667          * Start the chip
3668          * If the call back comes with error, we bail out.
3669          * This is a catastrophic error.
3670          */
3671         err = bnad_ioceth_enable(bnad);
3672         if (err) {
3673                 pr_err("BNA: Initialization failed err=%d\n",
3674                        err);
3675                 goto probe_success;
3676         }
3677
3678         spin_lock_irqsave(&bnad->bna_lock, flags);
3679         if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3680                 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3681                 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3682                         bna_attr(bna)->num_rxp - 1);
3683                 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3684                         bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3685                         err = -EIO;
3686         }
3687         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3688         if (err)
3689                 goto disable_ioceth;
3690
3691         spin_lock_irqsave(&bnad->bna_lock, flags);
3692         bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3693         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3694
3695         err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3696         if (err) {
3697                 err = -EIO;
3698                 goto disable_ioceth;
3699         }
3700
3701         spin_lock_irqsave(&bnad->bna_lock, flags);
3702         bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3703         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3704
3705         /* Get the burnt-in mac */
3706         spin_lock_irqsave(&bnad->bna_lock, flags);
3707         bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3708         bnad_set_netdev_perm_addr(bnad);
3709         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3710
3711         mutex_unlock(&bnad->conf_mutex);
3712
3713         /* Finally, reguister with net_device layer */
3714         err = register_netdev(netdev);
3715         if (err) {
3716                 pr_err("BNA : Registering with netdev failed\n");
3717                 goto probe_uninit;
3718         }
3719         set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3720
3721         return 0;
3722
3723 probe_success:
3724         mutex_unlock(&bnad->conf_mutex);
3725         return 0;
3726
3727 probe_uninit:
3728         mutex_lock(&bnad->conf_mutex);
3729         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3730 disable_ioceth:
3731         bnad_ioceth_disable(bnad);
3732         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3733         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3734         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3735         spin_lock_irqsave(&bnad->bna_lock, flags);
3736         bna_uninit(bna);
3737         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3738         bnad_mbox_irq_free(bnad);
3739         bnad_disable_msix(bnad);
3740 res_free:
3741         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3742 drv_uninit:
3743         /* Remove the debugfs node for this bnad */
3744         kfree(bnad->regdata);
3745         bnad_debugfs_uninit(bnad);
3746         bnad_uninit(bnad);
3747 pci_uninit:
3748         bnad_pci_uninit(pdev);
3749 unlock_mutex:
3750         mutex_unlock(&bnad->conf_mutex);
3751         bnad_remove_from_list(bnad);
3752         bnad_lock_uninit(bnad);
3753         free_netdev(netdev);
3754         return err;
3755 }
3756
3757 static void
3758 bnad_pci_remove(struct pci_dev *pdev)
3759 {
3760         struct net_device *netdev = pci_get_drvdata(pdev);
3761         struct bnad *bnad;
3762         struct bna *bna;
3763         unsigned long flags;
3764
3765         if (!netdev)
3766                 return;
3767
3768         pr_info("%s bnad_pci_remove\n", netdev->name);
3769         bnad = netdev_priv(netdev);
3770         bna = &bnad->bna;
3771
3772         if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3773                 unregister_netdev(netdev);
3774
3775         mutex_lock(&bnad->conf_mutex);
3776         bnad_ioceth_disable(bnad);
3777         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3778         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3779         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3780         spin_lock_irqsave(&bnad->bna_lock, flags);
3781         bna_uninit(bna);
3782         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3783
3784         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3785         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3786         bnad_mbox_irq_free(bnad);
3787         bnad_disable_msix(bnad);
3788         bnad_pci_uninit(pdev);
3789         mutex_unlock(&bnad->conf_mutex);
3790         bnad_remove_from_list(bnad);
3791         bnad_lock_uninit(bnad);
3792         /* Remove the debugfs node for this bnad */
3793         kfree(bnad->regdata);
3794         bnad_debugfs_uninit(bnad);
3795         bnad_uninit(bnad);
3796         free_netdev(netdev);
3797 }
3798
3799 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3800         {
3801                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3802                         PCI_DEVICE_ID_BROCADE_CT),
3803                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3804                 .class_mask =  0xffff00
3805         },
3806         {
3807                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3808                         BFA_PCI_DEVICE_ID_CT2),
3809                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3810                 .class_mask =  0xffff00
3811         },
3812         {0,  },
3813 };
3814
3815 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3816
3817 static struct pci_driver bnad_pci_driver = {
3818         .name = BNAD_NAME,
3819         .id_table = bnad_pci_id_table,
3820         .probe = bnad_pci_probe,
3821         .remove = bnad_pci_remove,
3822 };
3823
3824 static int __init
3825 bnad_module_init(void)
3826 {
3827         int err;
3828
3829         pr_info("Brocade 10G Ethernet driver - version: %s\n",
3830                         BNAD_VERSION);
3831
3832         bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3833
3834         err = pci_register_driver(&bnad_pci_driver);
3835         if (err < 0) {
3836                 pr_err("bna : PCI registration failed in module init "
3837                        "(%d)\n", err);
3838                 return err;
3839         }
3840
3841         return 0;
3842 }
3843
3844 static void __exit
3845 bnad_module_exit(void)
3846 {
3847         pci_unregister_driver(&bnad_pci_driver);
3848         release_firmware(bfi_fw);
3849 }
3850
3851 module_init(bnad_module_init);
3852 module_exit(bnad_module_exit);
3853
3854 MODULE_AUTHOR("Brocade");
3855 MODULE_LICENSE("GPL");
3856 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3857 MODULE_VERSION(BNAD_VERSION);
3858 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3859 MODULE_FIRMWARE(CNA_FW_FILE_CT2);