net: Fix too optimistic NETIF_F_HW_CSUM features
[firefly-linux-kernel-4.4.55.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 {
121         return (adapter->num_rx_qs > 1);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126         struct be_dma_mem *mem = &q->dma_mem;
127         if (mem->va)
128                 pci_free_consistent(adapter->pdev, mem->size,
129                         mem->va, mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133                 u16 len, u16 entry_size)
134 {
135         struct be_dma_mem *mem = &q->dma_mem;
136
137         memset(q, 0, sizeof(*q));
138         q->len = len;
139         q->entry_size = entry_size;
140         mem->size = len * entry_size;
141         mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
142         if (!mem->va)
143                 return -1;
144         memset(mem->va, 0, mem->size);
145         return 0;
146 }
147
148 static void be_intr_set(struct be_adapter *adapter, bool enable)
149 {
150         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
151         u32 reg = ioread32(addr);
152         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153
154         if (adapter->eeh_err)
155                 return;
156
157         if (!enabled && enable)
158                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159         else if (enabled && !enable)
160                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161         else
162                 return;
163
164         iowrite32(reg, addr);
165 }
166
167 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
168 {
169         u32 val = 0;
170         val |= qid & DB_RQ_RING_ID_MASK;
171         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
172
173         wmb();
174         iowrite32(val, adapter->db + DB_RQ_OFFSET);
175 }
176
177 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
178 {
179         u32 val = 0;
180         val |= qid & DB_TXULP_RING_ID_MASK;
181         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
182
183         wmb();
184         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
185 }
186
187 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
188                 bool arm, bool clear_int, u16 num_popped)
189 {
190         u32 val = 0;
191         val |= qid & DB_EQ_RING_ID_MASK;
192         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
193                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
194
195         if (adapter->eeh_err)
196                 return;
197
198         if (arm)
199                 val |= 1 << DB_EQ_REARM_SHIFT;
200         if (clear_int)
201                 val |= 1 << DB_EQ_CLR_SHIFT;
202         val |= 1 << DB_EQ_EVNT_SHIFT;
203         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
204         iowrite32(val, adapter->db + DB_EQ_OFFSET);
205 }
206
207 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
208 {
209         u32 val = 0;
210         val |= qid & DB_CQ_RING_ID_MASK;
211         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
212                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
213
214         if (adapter->eeh_err)
215                 return;
216
217         if (arm)
218                 val |= 1 << DB_CQ_REARM_SHIFT;
219         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
220         iowrite32(val, adapter->db + DB_CQ_OFFSET);
221 }
222
223 static int be_mac_addr_set(struct net_device *netdev, void *p)
224 {
225         struct be_adapter *adapter = netdev_priv(netdev);
226         struct sockaddr *addr = p;
227         int status = 0;
228
229         if (!is_valid_ether_addr(addr->sa_data))
230                 return -EADDRNOTAVAIL;
231
232         /* MAC addr configuration will be done in hardware for VFs
233          * by their corresponding PFs. Just copy to netdev addr here
234          */
235         if (!be_physfn(adapter))
236                 goto netdev_addr;
237
238         status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
239         if (status)
240                 return status;
241
242         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
243                         adapter->if_handle, &adapter->pmac_id);
244 netdev_addr:
245         if (!status)
246                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
247
248         return status;
249 }
250
251 void netdev_stats_update(struct be_adapter *adapter)
252 {
253         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
254         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
255         struct be_port_rxf_stats *port_stats =
256                         &rxf_stats->port[adapter->port_num];
257         struct net_device_stats *dev_stats = &adapter->netdev->stats;
258         struct be_erx_stats *erx_stats = &hw_stats->erx;
259         struct be_rx_obj *rxo;
260         int i;
261
262         memset(dev_stats, 0, sizeof(*dev_stats));
263         for_all_rx_queues(adapter, rxo, i) {
264                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
265                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
266                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
267                 /*  no space in linux buffers: best possible approximation */
268                 dev_stats->rx_dropped +=
269                         erx_stats->rx_drops_no_fragments[rxo->q.id];
270         }
271
272         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
273         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
274
275         /* bad pkts received */
276         dev_stats->rx_errors = port_stats->rx_crc_errors +
277                 port_stats->rx_alignment_symbol_errors +
278                 port_stats->rx_in_range_errors +
279                 port_stats->rx_out_range_errors +
280                 port_stats->rx_frame_too_long +
281                 port_stats->rx_dropped_too_small +
282                 port_stats->rx_dropped_too_short +
283                 port_stats->rx_dropped_header_too_small +
284                 port_stats->rx_dropped_tcp_length +
285                 port_stats->rx_dropped_runt +
286                 port_stats->rx_tcp_checksum_errs +
287                 port_stats->rx_ip_checksum_errs +
288                 port_stats->rx_udp_checksum_errs;
289
290         /* detailed rx errors */
291         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
292                 port_stats->rx_out_range_errors +
293                 port_stats->rx_frame_too_long;
294
295         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
296
297         /* frame alignment errors */
298         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
299
300         /* receiver fifo overrun */
301         /* drops_no_pbuf is no per i/f, it's per BE card */
302         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
303                                         port_stats->rx_input_fifo_overflow +
304                                         rxf_stats->rx_drops_no_pbuf;
305 }
306
307 void be_link_status_update(struct be_adapter *adapter, bool link_up)
308 {
309         struct net_device *netdev = adapter->netdev;
310
311         /* If link came up or went down */
312         if (adapter->link_up != link_up) {
313                 adapter->link_speed = -1;
314                 if (link_up) {
315                         netif_start_queue(netdev);
316                         netif_carrier_on(netdev);
317                         printk(KERN_INFO "%s: Link up\n", netdev->name);
318                 } else {
319                         netif_stop_queue(netdev);
320                         netif_carrier_off(netdev);
321                         printk(KERN_INFO "%s: Link down\n", netdev->name);
322                 }
323                 adapter->link_up = link_up;
324         }
325 }
326
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
329 {
330         struct be_eq_obj *rx_eq = &rxo->rx_eq;
331         struct be_rx_stats *stats = &rxo->stats;
332         ulong now = jiffies;
333         u32 eqd;
334
335         if (!rx_eq->enable_aic)
336                 return;
337
338         /* Wrapped around */
339         if (time_before(now, stats->rx_fps_jiffies)) {
340                 stats->rx_fps_jiffies = now;
341                 return;
342         }
343
344         /* Update once a second */
345         if ((now - stats->rx_fps_jiffies) < HZ)
346                 return;
347
348         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
349                         ((now - stats->rx_fps_jiffies) / HZ);
350
351         stats->rx_fps_jiffies = now;
352         stats->prev_rx_frags = stats->rx_frags;
353         eqd = stats->rx_fps / 110000;
354         eqd = eqd << 3;
355         if (eqd > rx_eq->max_eqd)
356                 eqd = rx_eq->max_eqd;
357         if (eqd < rx_eq->min_eqd)
358                 eqd = rx_eq->min_eqd;
359         if (eqd < 10)
360                 eqd = 0;
361         if (eqd != rx_eq->cur_eqd)
362                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
363
364         rx_eq->cur_eqd = eqd;
365 }
366
367 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368 {
369         u64 rate = bytes;
370
371         do_div(rate, ticks / HZ);
372         rate <<= 3;                     /* bytes/sec -> bits/sec */
373         do_div(rate, 1000000ul);        /* MB/Sec */
374
375         return rate;
376 }
377
378 static void be_tx_rate_update(struct be_adapter *adapter)
379 {
380         struct be_tx_stats *stats = tx_stats(adapter);
381         ulong now = jiffies;
382
383         /* Wrapped around? */
384         if (time_before(now, stats->be_tx_jiffies)) {
385                 stats->be_tx_jiffies = now;
386                 return;
387         }
388
389         /* Update tx rate once in two seconds */
390         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
391                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392                                                   - stats->be_tx_bytes_prev,
393                                                  now - stats->be_tx_jiffies);
394                 stats->be_tx_jiffies = now;
395                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396         }
397 }
398
399 static void be_tx_stats_update(struct be_adapter *adapter,
400                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
401 {
402         struct be_tx_stats *stats = tx_stats(adapter);
403         stats->be_tx_reqs++;
404         stats->be_tx_wrbs += wrb_cnt;
405         stats->be_tx_bytes += copied;
406         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
407         if (stopped)
408                 stats->be_tx_stops++;
409 }
410
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413                                                                 bool *dummy)
414 {
415         int cnt = (skb->len > skb->data_len);
416
417         cnt += skb_shinfo(skb)->nr_frags;
418
419         /* to account for hdr wrb */
420         cnt++;
421         if (lancer_chip(adapter) || !(cnt & 1)) {
422                 *dummy = false;
423         } else {
424                 /* add a dummy to make it an even num */
425                 cnt++;
426                 *dummy = true;
427         }
428         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429         return cnt;
430 }
431
432 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433 {
434         wrb->frag_pa_hi = upper_32_bits(addr);
435         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437 }
438
439 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
441 {
442         u8 vlan_prio = 0;
443         u16 vlan_tag = 0;
444
445         memset(hdr, 0, sizeof(*hdr));
446
447         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
449         if (skb_is_gso(skb)) {
450                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452                         hdr, skb_shinfo(skb)->gso_size);
453                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
454                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455                 if (lancer_chip(adapter) && adapter->sli_family  ==
456                                                         LANCER_A0_SLI_FAMILY) {
457                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458                         if (is_tcp_pkt(skb))
459                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460                                                                 tcpcs, hdr, 1);
461                         else if (is_udp_pkt(skb))
462                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463                                                                 udpcs, hdr, 1);
464                 }
465         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466                 if (is_tcp_pkt(skb))
467                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468                 else if (is_udp_pkt(skb))
469                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470         }
471
472         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
473                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
474                 vlan_tag = vlan_tx_tag_get(skb);
475                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476                 /* If vlan priority provided by OS is NOT in available bmap */
477                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479                                         adapter->recommended_prio;
480                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
481         }
482
483         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487 }
488
489 static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
490                 bool unmap_single)
491 {
492         dma_addr_t dma;
493
494         be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497         if (wrb->frag_len) {
498                 if (unmap_single)
499                         pci_unmap_single(pdev, dma, wrb->frag_len,
500                                 PCI_DMA_TODEVICE);
501                 else
502                         pci_unmap_page(pdev, dma, wrb->frag_len,
503                                 PCI_DMA_TODEVICE);
504         }
505 }
506
507 static int make_tx_wrbs(struct be_adapter *adapter,
508                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
509 {
510         dma_addr_t busaddr;
511         int i, copied = 0;
512         struct pci_dev *pdev = adapter->pdev;
513         struct sk_buff *first_skb = skb;
514         struct be_queue_info *txq = &adapter->tx_obj.q;
515         struct be_eth_wrb *wrb;
516         struct be_eth_hdr_wrb *hdr;
517         bool map_single = false;
518         u16 map_head;
519
520         hdr = queue_head_node(txq);
521         queue_head_inc(txq);
522         map_head = txq->head;
523
524         if (skb->len > skb->data_len) {
525                 int len = skb_headlen(skb);
526                 busaddr = pci_map_single(pdev, skb->data, len,
527                                          PCI_DMA_TODEVICE);
528                 if (pci_dma_mapping_error(pdev, busaddr))
529                         goto dma_err;
530                 map_single = true;
531                 wrb = queue_head_node(txq);
532                 wrb_fill(wrb, busaddr, len);
533                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
534                 queue_head_inc(txq);
535                 copied += len;
536         }
537
538         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
539                 struct skb_frag_struct *frag =
540                         &skb_shinfo(skb)->frags[i];
541                 busaddr = pci_map_page(pdev, frag->page,
542                                        frag->page_offset,
543                                        frag->size, PCI_DMA_TODEVICE);
544                 if (pci_dma_mapping_error(pdev, busaddr))
545                         goto dma_err;
546                 wrb = queue_head_node(txq);
547                 wrb_fill(wrb, busaddr, frag->size);
548                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549                 queue_head_inc(txq);
550                 copied += frag->size;
551         }
552
553         if (dummy_wrb) {
554                 wrb = queue_head_node(txq);
555                 wrb_fill(wrb, 0, 0);
556                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
557                 queue_head_inc(txq);
558         }
559
560         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
561         be_dws_cpu_to_le(hdr, sizeof(*hdr));
562
563         return copied;
564 dma_err:
565         txq->head = map_head;
566         while (copied) {
567                 wrb = queue_head_node(txq);
568                 unmap_tx_frag(pdev, wrb, map_single);
569                 map_single = false;
570                 copied -= wrb->frag_len;
571                 queue_head_inc(txq);
572         }
573         return 0;
574 }
575
576 static netdev_tx_t be_xmit(struct sk_buff *skb,
577                         struct net_device *netdev)
578 {
579         struct be_adapter *adapter = netdev_priv(netdev);
580         struct be_tx_obj *tx_obj = &adapter->tx_obj;
581         struct be_queue_info *txq = &tx_obj->q;
582         u32 wrb_cnt = 0, copied = 0;
583         u32 start = txq->head;
584         bool dummy_wrb, stopped = false;
585
586         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
587
588         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
589         if (copied) {
590                 /* record the sent skb in the sent_skb table */
591                 BUG_ON(tx_obj->sent_skb_list[start]);
592                 tx_obj->sent_skb_list[start] = skb;
593
594                 /* Ensure txq has space for the next skb; Else stop the queue
595                  * *BEFORE* ringing the tx doorbell, so that we serialze the
596                  * tx compls of the current transmit which'll wake up the queue
597                  */
598                 atomic_add(wrb_cnt, &txq->used);
599                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
600                                                                 txq->len) {
601                         netif_stop_queue(netdev);
602                         stopped = true;
603                 }
604
605                 be_txq_notify(adapter, txq->id, wrb_cnt);
606
607                 be_tx_stats_update(adapter, wrb_cnt, copied,
608                                 skb_shinfo(skb)->gso_segs, stopped);
609         } else {
610                 txq->head = start;
611                 dev_kfree_skb_any(skb);
612         }
613         return NETDEV_TX_OK;
614 }
615
616 static int be_change_mtu(struct net_device *netdev, int new_mtu)
617 {
618         struct be_adapter *adapter = netdev_priv(netdev);
619         if (new_mtu < BE_MIN_MTU ||
620                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
621                                         (ETH_HLEN + ETH_FCS_LEN))) {
622                 dev_info(&adapter->pdev->dev,
623                         "MTU must be between %d and %d bytes\n",
624                         BE_MIN_MTU,
625                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
626                 return -EINVAL;
627         }
628         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
629                         netdev->mtu, new_mtu);
630         netdev->mtu = new_mtu;
631         return 0;
632 }
633
634 /*
635  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
636  * If the user configures more, place BE in vlan promiscuous mode.
637  */
638 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
639 {
640         u16 vtag[BE_NUM_VLANS_SUPPORTED];
641         u16 ntags = 0, i;
642         int status = 0;
643         u32 if_handle;
644
645         if (vf) {
646                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
647                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
648                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
649         }
650
651         if (adapter->vlans_added <= adapter->max_vlans)  {
652                 /* Construct VLAN Table to give to HW */
653                 for (i = 0; i < VLAN_N_VID; i++) {
654                         if (adapter->vlan_tag[i]) {
655                                 vtag[ntags] = cpu_to_le16(i);
656                                 ntags++;
657                         }
658                 }
659                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660                                         vtag, ntags, 1, 0);
661         } else {
662                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
663                                         NULL, 0, 1, 1);
664         }
665
666         return status;
667 }
668
669 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
670 {
671         struct be_adapter *adapter = netdev_priv(netdev);
672
673         adapter->vlan_grp = grp;
674 }
675
676 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
677 {
678         struct be_adapter *adapter = netdev_priv(netdev);
679
680         adapter->vlans_added++;
681         if (!be_physfn(adapter))
682                 return;
683
684         adapter->vlan_tag[vid] = 1;
685         if (adapter->vlans_added <= (adapter->max_vlans + 1))
686                 be_vid_config(adapter, false, 0);
687 }
688
689 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
690 {
691         struct be_adapter *adapter = netdev_priv(netdev);
692
693         adapter->vlans_added--;
694         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
695
696         if (!be_physfn(adapter))
697                 return;
698
699         adapter->vlan_tag[vid] = 0;
700         if (adapter->vlans_added <= adapter->max_vlans)
701                 be_vid_config(adapter, false, 0);
702 }
703
704 static void be_set_multicast_list(struct net_device *netdev)
705 {
706         struct be_adapter *adapter = netdev_priv(netdev);
707
708         if (netdev->flags & IFF_PROMISC) {
709                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
710                 adapter->promiscuous = true;
711                 goto done;
712         }
713
714         /* BE was previously in promiscous mode; disable it */
715         if (adapter->promiscuous) {
716                 adapter->promiscuous = false;
717                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
718         }
719
720         /* Enable multicast promisc if num configured exceeds what we support */
721         if (netdev->flags & IFF_ALLMULTI ||
722             netdev_mc_count(netdev) > BE_MAX_MC) {
723                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
724                                 &adapter->mc_cmd_mem);
725                 goto done;
726         }
727
728         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
729                 &adapter->mc_cmd_mem);
730 done:
731         return;
732 }
733
734 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
735 {
736         struct be_adapter *adapter = netdev_priv(netdev);
737         int status;
738
739         if (!adapter->sriov_enabled)
740                 return -EPERM;
741
742         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
743                 return -EINVAL;
744
745         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
746                 status = be_cmd_pmac_del(adapter,
747                                         adapter->vf_cfg[vf].vf_if_handle,
748                                         adapter->vf_cfg[vf].vf_pmac_id);
749
750         status = be_cmd_pmac_add(adapter, mac,
751                                 adapter->vf_cfg[vf].vf_if_handle,
752                                 &adapter->vf_cfg[vf].vf_pmac_id);
753
754         if (status)
755                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
756                                 mac, vf);
757         else
758                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
759
760         return status;
761 }
762
763 static int be_get_vf_config(struct net_device *netdev, int vf,
764                         struct ifla_vf_info *vi)
765 {
766         struct be_adapter *adapter = netdev_priv(netdev);
767
768         if (!adapter->sriov_enabled)
769                 return -EPERM;
770
771         if (vf >= num_vfs)
772                 return -EINVAL;
773
774         vi->vf = vf;
775         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
776         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
777         vi->qos = 0;
778         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
779
780         return 0;
781 }
782
783 static int be_set_vf_vlan(struct net_device *netdev,
784                         int vf, u16 vlan, u8 qos)
785 {
786         struct be_adapter *adapter = netdev_priv(netdev);
787         int status = 0;
788
789         if (!adapter->sriov_enabled)
790                 return -EPERM;
791
792         if ((vf >= num_vfs) || (vlan > 4095))
793                 return -EINVAL;
794
795         if (vlan) {
796                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
797                 adapter->vlans_added++;
798         } else {
799                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
800                 adapter->vlans_added--;
801         }
802
803         status = be_vid_config(adapter, true, vf);
804
805         if (status)
806                 dev_info(&adapter->pdev->dev,
807                                 "VLAN %d config on VF %d failed\n", vlan, vf);
808         return status;
809 }
810
811 static int be_set_vf_tx_rate(struct net_device *netdev,
812                         int vf, int rate)
813 {
814         struct be_adapter *adapter = netdev_priv(netdev);
815         int status = 0;
816
817         if (!adapter->sriov_enabled)
818                 return -EPERM;
819
820         if ((vf >= num_vfs) || (rate < 0))
821                 return -EINVAL;
822
823         if (rate > 10000)
824                 rate = 10000;
825
826         adapter->vf_cfg[vf].vf_tx_rate = rate;
827         status = be_cmd_set_qos(adapter, rate / 10, vf);
828
829         if (status)
830                 dev_info(&adapter->pdev->dev,
831                                 "tx rate %d on VF %d failed\n", rate, vf);
832         return status;
833 }
834
835 static void be_rx_rate_update(struct be_rx_obj *rxo)
836 {
837         struct be_rx_stats *stats = &rxo->stats;
838         ulong now = jiffies;
839
840         /* Wrapped around */
841         if (time_before(now, stats->rx_jiffies)) {
842                 stats->rx_jiffies = now;
843                 return;
844         }
845
846         /* Update the rate once in two seconds */
847         if ((now - stats->rx_jiffies) < 2 * HZ)
848                 return;
849
850         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
851                                 now - stats->rx_jiffies);
852         stats->rx_jiffies = now;
853         stats->rx_bytes_prev = stats->rx_bytes;
854 }
855
856 static void be_rx_stats_update(struct be_rx_obj *rxo,
857                 u32 pktsize, u16 numfrags, u8 pkt_type)
858 {
859         struct be_rx_stats *stats = &rxo->stats;
860
861         stats->rx_compl++;
862         stats->rx_frags += numfrags;
863         stats->rx_bytes += pktsize;
864         stats->rx_pkts++;
865         if (pkt_type == BE_MULTICAST_PACKET)
866                 stats->rx_mcast_pkts++;
867 }
868
869 static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
870 {
871         u8 l4_cksm, ipv6, ipcksm;
872
873         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
874         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
875         ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
876
877         /* Ignore ipcksm for ipv6 pkts */
878         return l4_cksm && (ipcksm || ipv6);
879 }
880
881 static struct be_rx_page_info *
882 get_rx_page_info(struct be_adapter *adapter,
883                 struct be_rx_obj *rxo,
884                 u16 frag_idx)
885 {
886         struct be_rx_page_info *rx_page_info;
887         struct be_queue_info *rxq = &rxo->q;
888
889         rx_page_info = &rxo->page_info_tbl[frag_idx];
890         BUG_ON(!rx_page_info->page);
891
892         if (rx_page_info->last_page_user) {
893                 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
894                         adapter->big_page_size, PCI_DMA_FROMDEVICE);
895                 rx_page_info->last_page_user = false;
896         }
897
898         atomic_dec(&rxq->used);
899         return rx_page_info;
900 }
901
902 /* Throwaway the data in the Rx completion */
903 static void be_rx_compl_discard(struct be_adapter *adapter,
904                 struct be_rx_obj *rxo,
905                 struct be_eth_rx_compl *rxcp)
906 {
907         struct be_queue_info *rxq = &rxo->q;
908         struct be_rx_page_info *page_info;
909         u16 rxq_idx, i, num_rcvd;
910
911         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
912         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
913
914         for (i = 0; i < num_rcvd; i++) {
915                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
916                 put_page(page_info->page);
917                 memset(page_info, 0, sizeof(*page_info));
918                 index_inc(&rxq_idx, rxq->len);
919         }
920 }
921
922 /*
923  * skb_fill_rx_data forms a complete skb for an ether frame
924  * indicated by rxcp.
925  */
926 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
927                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
928                         u16 num_rcvd)
929 {
930         struct be_queue_info *rxq = &rxo->q;
931         struct be_rx_page_info *page_info;
932         u16 rxq_idx, i, j;
933         u32 pktsize, hdr_len, curr_frag_len, size;
934         u8 *start;
935         u8 pkt_type;
936
937         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
938         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
939         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
940
941         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
942
943         start = page_address(page_info->page) + page_info->page_offset;
944         prefetch(start);
945
946         /* Copy data in the first descriptor of this completion */
947         curr_frag_len = min(pktsize, rx_frag_size);
948
949         /* Copy the header portion into skb_data */
950         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
951         memcpy(skb->data, start, hdr_len);
952         skb->len = curr_frag_len;
953         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
954                 /* Complete packet has now been moved to data */
955                 put_page(page_info->page);
956                 skb->data_len = 0;
957                 skb->tail += curr_frag_len;
958         } else {
959                 skb_shinfo(skb)->nr_frags = 1;
960                 skb_shinfo(skb)->frags[0].page = page_info->page;
961                 skb_shinfo(skb)->frags[0].page_offset =
962                                         page_info->page_offset + hdr_len;
963                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
964                 skb->data_len = curr_frag_len - hdr_len;
965                 skb->tail += hdr_len;
966         }
967         page_info->page = NULL;
968
969         if (pktsize <= rx_frag_size) {
970                 BUG_ON(num_rcvd != 1);
971                 goto done;
972         }
973
974         /* More frags present for this completion */
975         size = pktsize;
976         for (i = 1, j = 0; i < num_rcvd; i++) {
977                 size -= curr_frag_len;
978                 index_inc(&rxq_idx, rxq->len);
979                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
980
981                 curr_frag_len = min(size, rx_frag_size);
982
983                 /* Coalesce all frags from the same physical page in one slot */
984                 if (page_info->page_offset == 0) {
985                         /* Fresh page */
986                         j++;
987                         skb_shinfo(skb)->frags[j].page = page_info->page;
988                         skb_shinfo(skb)->frags[j].page_offset =
989                                                         page_info->page_offset;
990                         skb_shinfo(skb)->frags[j].size = 0;
991                         skb_shinfo(skb)->nr_frags++;
992                 } else {
993                         put_page(page_info->page);
994                 }
995
996                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
997                 skb->len += curr_frag_len;
998                 skb->data_len += curr_frag_len;
999
1000                 page_info->page = NULL;
1001         }
1002         BUG_ON(j > MAX_SKB_FRAGS);
1003
1004 done:
1005         be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
1006 }
1007
1008 /* Process the RX completion indicated by rxcp when GRO is disabled */
1009 static void be_rx_compl_process(struct be_adapter *adapter,
1010                         struct be_rx_obj *rxo,
1011                         struct be_eth_rx_compl *rxcp)
1012 {
1013         struct sk_buff *skb;
1014         u32 vlanf, vid;
1015         u16 num_rcvd;
1016         u8 vtm;
1017
1018         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1019         /* Is it a flush compl that has no data */
1020         if (unlikely(num_rcvd == 0))
1021                 return;
1022
1023         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1024         if (unlikely(!skb)) {
1025                 if (net_ratelimit())
1026                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1027                 be_rx_compl_discard(adapter, rxo, rxcp);
1028                 return;
1029         }
1030
1031         skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1032
1033         if (likely(adapter->rx_csum && csum_passed(rxcp)))
1034                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1035         else
1036                 skb_checksum_none_assert(skb);
1037
1038         skb->truesize = skb->len + sizeof(struct sk_buff);
1039         skb->protocol = eth_type_trans(skb, adapter->netdev);
1040
1041         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1042         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1043
1044         /* vlanf could be wrongly set in some cards.
1045          * ignore if vtm is not set */
1046         if ((adapter->function_mode & 0x400) && !vtm)
1047                 vlanf = 0;
1048
1049         if (unlikely(vlanf)) {
1050                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1051                         kfree_skb(skb);
1052                         return;
1053                 }
1054                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1055                 if (!lancer_chip(adapter))
1056                         vid = swab16(vid);
1057                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1058         } else {
1059                 netif_receive_skb(skb);
1060         }
1061 }
1062
1063 /* Process the RX completion indicated by rxcp when GRO is enabled */
1064 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1065                 struct be_rx_obj *rxo,
1066                 struct be_eth_rx_compl *rxcp)
1067 {
1068         struct be_rx_page_info *page_info;
1069         struct sk_buff *skb = NULL;
1070         struct be_queue_info *rxq = &rxo->q;
1071         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1072         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1073         u16 i, rxq_idx = 0, vid, j;
1074         u8 vtm;
1075         u8 pkt_type;
1076
1077         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1078         /* Is it a flush compl that has no data */
1079         if (unlikely(num_rcvd == 0))
1080                 return;
1081
1082         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1083         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1084         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1085         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1086         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1087
1088         /* vlanf could be wrongly set in some cards.
1089          * ignore if vtm is not set */
1090         if ((adapter->function_mode & 0x400) && !vtm)
1091                 vlanf = 0;
1092
1093         skb = napi_get_frags(&eq_obj->napi);
1094         if (!skb) {
1095                 be_rx_compl_discard(adapter, rxo, rxcp);
1096                 return;
1097         }
1098
1099         remaining = pkt_size;
1100         for (i = 0, j = -1; i < num_rcvd; i++) {
1101                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1102
1103                 curr_frag_len = min(remaining, rx_frag_size);
1104
1105                 /* Coalesce all frags from the same physical page in one slot */
1106                 if (i == 0 || page_info->page_offset == 0) {
1107                         /* First frag or Fresh page */
1108                         j++;
1109                         skb_shinfo(skb)->frags[j].page = page_info->page;
1110                         skb_shinfo(skb)->frags[j].page_offset =
1111                                                         page_info->page_offset;
1112                         skb_shinfo(skb)->frags[j].size = 0;
1113                 } else {
1114                         put_page(page_info->page);
1115                 }
1116                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1117
1118                 remaining -= curr_frag_len;
1119                 index_inc(&rxq_idx, rxq->len);
1120                 memset(page_info, 0, sizeof(*page_info));
1121         }
1122         BUG_ON(j > MAX_SKB_FRAGS);
1123
1124         skb_shinfo(skb)->nr_frags = j + 1;
1125         skb->len = pkt_size;
1126         skb->data_len = pkt_size;
1127         skb->truesize += pkt_size;
1128         skb->ip_summed = CHECKSUM_UNNECESSARY;
1129
1130         if (likely(!vlanf)) {
1131                 napi_gro_frags(&eq_obj->napi);
1132         } else {
1133                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1134                 if (!lancer_chip(adapter))
1135                         vid = swab16(vid);
1136
1137                 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1138                         return;
1139
1140                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1141         }
1142
1143         be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1144 }
1145
1146 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1147 {
1148         struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1149
1150         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1151                 return NULL;
1152
1153         rmb();
1154         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1155
1156         queue_tail_inc(&rxo->cq);
1157         return rxcp;
1158 }
1159
1160 /* To reset the valid bit, we need to reset the whole word as
1161  * when walking the queue the valid entries are little-endian
1162  * and invalid entries are host endian
1163  */
1164 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1165 {
1166         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1167 }
1168
1169 static inline struct page *be_alloc_pages(u32 size)
1170 {
1171         gfp_t alloc_flags = GFP_ATOMIC;
1172         u32 order = get_order(size);
1173         if (order > 0)
1174                 alloc_flags |= __GFP_COMP;
1175         return  alloc_pages(alloc_flags, order);
1176 }
1177
1178 /*
1179  * Allocate a page, split it to fragments of size rx_frag_size and post as
1180  * receive buffers to BE
1181  */
1182 static void be_post_rx_frags(struct be_rx_obj *rxo)
1183 {
1184         struct be_adapter *adapter = rxo->adapter;
1185         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1186         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1187         struct be_queue_info *rxq = &rxo->q;
1188         struct page *pagep = NULL;
1189         struct be_eth_rx_d *rxd;
1190         u64 page_dmaaddr = 0, frag_dmaaddr;
1191         u32 posted, page_offset = 0;
1192
1193         page_info = &rxo->page_info_tbl[rxq->head];
1194         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1195                 if (!pagep) {
1196                         pagep = be_alloc_pages(adapter->big_page_size);
1197                         if (unlikely(!pagep)) {
1198                                 rxo->stats.rx_post_fail++;
1199                                 break;
1200                         }
1201                         page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1202                                                 adapter->big_page_size,
1203                                                 PCI_DMA_FROMDEVICE);
1204                         page_info->page_offset = 0;
1205                 } else {
1206                         get_page(pagep);
1207                         page_info->page_offset = page_offset + rx_frag_size;
1208                 }
1209                 page_offset = page_info->page_offset;
1210                 page_info->page = pagep;
1211                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1212                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1213
1214                 rxd = queue_head_node(rxq);
1215                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1216                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1217
1218                 /* Any space left in the current big page for another frag? */
1219                 if ((page_offset + rx_frag_size + rx_frag_size) >
1220                                         adapter->big_page_size) {
1221                         pagep = NULL;
1222                         page_info->last_page_user = true;
1223                 }
1224
1225                 prev_page_info = page_info;
1226                 queue_head_inc(rxq);
1227                 page_info = &page_info_tbl[rxq->head];
1228         }
1229         if (pagep)
1230                 prev_page_info->last_page_user = true;
1231
1232         if (posted) {
1233                 atomic_add(posted, &rxq->used);
1234                 be_rxq_notify(adapter, rxq->id, posted);
1235         } else if (atomic_read(&rxq->used) == 0) {
1236                 /* Let be_worker replenish when memory is available */
1237                 rxo->rx_post_starved = true;
1238         }
1239 }
1240
1241 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1242 {
1243         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1244
1245         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1246                 return NULL;
1247
1248         rmb();
1249         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1250
1251         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1252
1253         queue_tail_inc(tx_cq);
1254         return txcp;
1255 }
1256
1257 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1258 {
1259         struct be_queue_info *txq = &adapter->tx_obj.q;
1260         struct be_eth_wrb *wrb;
1261         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1262         struct sk_buff *sent_skb;
1263         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1264         bool unmap_skb_hdr = true;
1265
1266         sent_skb = sent_skbs[txq->tail];
1267         BUG_ON(!sent_skb);
1268         sent_skbs[txq->tail] = NULL;
1269
1270         /* skip header wrb */
1271         queue_tail_inc(txq);
1272
1273         do {
1274                 cur_index = txq->tail;
1275                 wrb = queue_tail_node(txq);
1276                 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1277                                         skb_headlen(sent_skb)));
1278                 unmap_skb_hdr = false;
1279
1280                 num_wrbs++;
1281                 queue_tail_inc(txq);
1282         } while (cur_index != last_index);
1283
1284         atomic_sub(num_wrbs, &txq->used);
1285
1286         kfree_skb(sent_skb);
1287 }
1288
1289 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1290 {
1291         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1292
1293         if (!eqe->evt)
1294                 return NULL;
1295
1296         rmb();
1297         eqe->evt = le32_to_cpu(eqe->evt);
1298         queue_tail_inc(&eq_obj->q);
1299         return eqe;
1300 }
1301
1302 static int event_handle(struct be_adapter *adapter,
1303                         struct be_eq_obj *eq_obj)
1304 {
1305         struct be_eq_entry *eqe;
1306         u16 num = 0;
1307
1308         while ((eqe = event_get(eq_obj)) != NULL) {
1309                 eqe->evt = 0;
1310                 num++;
1311         }
1312
1313         /* Deal with any spurious interrupts that come
1314          * without events
1315          */
1316         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1317         if (num)
1318                 napi_schedule(&eq_obj->napi);
1319
1320         return num;
1321 }
1322
1323 /* Just read and notify events without processing them.
1324  * Used at the time of destroying event queues */
1325 static void be_eq_clean(struct be_adapter *adapter,
1326                         struct be_eq_obj *eq_obj)
1327 {
1328         struct be_eq_entry *eqe;
1329         u16 num = 0;
1330
1331         while ((eqe = event_get(eq_obj)) != NULL) {
1332                 eqe->evt = 0;
1333                 num++;
1334         }
1335
1336         if (num)
1337                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1338 }
1339
1340 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1341 {
1342         struct be_rx_page_info *page_info;
1343         struct be_queue_info *rxq = &rxo->q;
1344         struct be_queue_info *rx_cq = &rxo->cq;
1345         struct be_eth_rx_compl *rxcp;
1346         u16 tail;
1347
1348         /* First cleanup pending rx completions */
1349         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1350                 be_rx_compl_discard(adapter, rxo, rxcp);
1351                 be_rx_compl_reset(rxcp);
1352                 be_cq_notify(adapter, rx_cq->id, true, 1);
1353         }
1354
1355         /* Then free posted rx buffer that were not used */
1356         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1357         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1358                 page_info = get_rx_page_info(adapter, rxo, tail);
1359                 put_page(page_info->page);
1360                 memset(page_info, 0, sizeof(*page_info));
1361         }
1362         BUG_ON(atomic_read(&rxq->used));
1363 }
1364
1365 static void be_tx_compl_clean(struct be_adapter *adapter)
1366 {
1367         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1368         struct be_queue_info *txq = &adapter->tx_obj.q;
1369         struct be_eth_tx_compl *txcp;
1370         u16 end_idx, cmpl = 0, timeo = 0;
1371         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1372         struct sk_buff *sent_skb;
1373         bool dummy_wrb;
1374
1375         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1376         do {
1377                 while ((txcp = be_tx_compl_get(tx_cq))) {
1378                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1379                                         wrb_index, txcp);
1380                         be_tx_compl_process(adapter, end_idx);
1381                         cmpl++;
1382                 }
1383                 if (cmpl) {
1384                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1385                         cmpl = 0;
1386                 }
1387
1388                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1389                         break;
1390
1391                 mdelay(1);
1392         } while (true);
1393
1394         if (atomic_read(&txq->used))
1395                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1396                         atomic_read(&txq->used));
1397
1398         /* free posted tx for which compls will never arrive */
1399         while (atomic_read(&txq->used)) {
1400                 sent_skb = sent_skbs[txq->tail];
1401                 end_idx = txq->tail;
1402                 index_adv(&end_idx,
1403                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1404                         txq->len);
1405                 be_tx_compl_process(adapter, end_idx);
1406         }
1407 }
1408
1409 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1410 {
1411         struct be_queue_info *q;
1412
1413         q = &adapter->mcc_obj.q;
1414         if (q->created)
1415                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1416         be_queue_free(adapter, q);
1417
1418         q = &adapter->mcc_obj.cq;
1419         if (q->created)
1420                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1421         be_queue_free(adapter, q);
1422 }
1423
1424 /* Must be called only after TX qs are created as MCC shares TX EQ */
1425 static int be_mcc_queues_create(struct be_adapter *adapter)
1426 {
1427         struct be_queue_info *q, *cq;
1428
1429         /* Alloc MCC compl queue */
1430         cq = &adapter->mcc_obj.cq;
1431         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1432                         sizeof(struct be_mcc_compl)))
1433                 goto err;
1434
1435         /* Ask BE to create MCC compl queue; share TX's eq */
1436         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1437                 goto mcc_cq_free;
1438
1439         /* Alloc MCC queue */
1440         q = &adapter->mcc_obj.q;
1441         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1442                 goto mcc_cq_destroy;
1443
1444         /* Ask BE to create MCC queue */
1445         if (be_cmd_mccq_create(adapter, q, cq))
1446                 goto mcc_q_free;
1447
1448         return 0;
1449
1450 mcc_q_free:
1451         be_queue_free(adapter, q);
1452 mcc_cq_destroy:
1453         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1454 mcc_cq_free:
1455         be_queue_free(adapter, cq);
1456 err:
1457         return -1;
1458 }
1459
1460 static void be_tx_queues_destroy(struct be_adapter *adapter)
1461 {
1462         struct be_queue_info *q;
1463
1464         q = &adapter->tx_obj.q;
1465         if (q->created)
1466                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1467         be_queue_free(adapter, q);
1468
1469         q = &adapter->tx_obj.cq;
1470         if (q->created)
1471                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1472         be_queue_free(adapter, q);
1473
1474         /* Clear any residual events */
1475         be_eq_clean(adapter, &adapter->tx_eq);
1476
1477         q = &adapter->tx_eq.q;
1478         if (q->created)
1479                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1480         be_queue_free(adapter, q);
1481 }
1482
1483 static int be_tx_queues_create(struct be_adapter *adapter)
1484 {
1485         struct be_queue_info *eq, *q, *cq;
1486
1487         adapter->tx_eq.max_eqd = 0;
1488         adapter->tx_eq.min_eqd = 0;
1489         adapter->tx_eq.cur_eqd = 96;
1490         adapter->tx_eq.enable_aic = false;
1491         /* Alloc Tx Event queue */
1492         eq = &adapter->tx_eq.q;
1493         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1494                 return -1;
1495
1496         /* Ask BE to create Tx Event queue */
1497         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1498                 goto tx_eq_free;
1499
1500         adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1501
1502
1503         /* Alloc TX eth compl queue */
1504         cq = &adapter->tx_obj.cq;
1505         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1506                         sizeof(struct be_eth_tx_compl)))
1507                 goto tx_eq_destroy;
1508
1509         /* Ask BE to create Tx eth compl queue */
1510         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1511                 goto tx_cq_free;
1512
1513         /* Alloc TX eth queue */
1514         q = &adapter->tx_obj.q;
1515         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1516                 goto tx_cq_destroy;
1517
1518         /* Ask BE to create Tx eth queue */
1519         if (be_cmd_txq_create(adapter, q, cq))
1520                 goto tx_q_free;
1521         return 0;
1522
1523 tx_q_free:
1524         be_queue_free(adapter, q);
1525 tx_cq_destroy:
1526         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1527 tx_cq_free:
1528         be_queue_free(adapter, cq);
1529 tx_eq_destroy:
1530         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1531 tx_eq_free:
1532         be_queue_free(adapter, eq);
1533         return -1;
1534 }
1535
1536 static void be_rx_queues_destroy(struct be_adapter *adapter)
1537 {
1538         struct be_queue_info *q;
1539         struct be_rx_obj *rxo;
1540         int i;
1541
1542         for_all_rx_queues(adapter, rxo, i) {
1543                 q = &rxo->q;
1544                 if (q->created) {
1545                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1546                         /* After the rxq is invalidated, wait for a grace time
1547                          * of 1ms for all dma to end and the flush compl to
1548                          * arrive
1549                          */
1550                         mdelay(1);
1551                         be_rx_q_clean(adapter, rxo);
1552                 }
1553                 be_queue_free(adapter, q);
1554
1555                 q = &rxo->cq;
1556                 if (q->created)
1557                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1558                 be_queue_free(adapter, q);
1559
1560                 /* Clear any residual events */
1561                 q = &rxo->rx_eq.q;
1562                 if (q->created) {
1563                         be_eq_clean(adapter, &rxo->rx_eq);
1564                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1565                 }
1566                 be_queue_free(adapter, q);
1567         }
1568 }
1569
1570 static int be_rx_queues_create(struct be_adapter *adapter)
1571 {
1572         struct be_queue_info *eq, *q, *cq;
1573         struct be_rx_obj *rxo;
1574         int rc, i;
1575
1576         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1577         for_all_rx_queues(adapter, rxo, i) {
1578                 rxo->adapter = adapter;
1579                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1580                 rxo->rx_eq.enable_aic = true;
1581
1582                 /* EQ */
1583                 eq = &rxo->rx_eq.q;
1584                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1585                                         sizeof(struct be_eq_entry));
1586                 if (rc)
1587                         goto err;
1588
1589                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1590                 if (rc)
1591                         goto err;
1592
1593                 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1594
1595                 /* CQ */
1596                 cq = &rxo->cq;
1597                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1598                                 sizeof(struct be_eth_rx_compl));
1599                 if (rc)
1600                         goto err;
1601
1602                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1603                 if (rc)
1604                         goto err;
1605                 /* Rx Q */
1606                 q = &rxo->q;
1607                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1608                                 sizeof(struct be_eth_rx_d));
1609                 if (rc)
1610                         goto err;
1611
1612                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1613                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1614                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1615                 if (rc)
1616                         goto err;
1617         }
1618
1619         if (be_multi_rxq(adapter)) {
1620                 u8 rsstable[MAX_RSS_QS];
1621
1622                 for_all_rss_queues(adapter, rxo, i)
1623                         rsstable[i] = rxo->rss_id;
1624
1625                 rc = be_cmd_rss_config(adapter, rsstable,
1626                         adapter->num_rx_qs - 1);
1627                 if (rc)
1628                         goto err;
1629         }
1630
1631         return 0;
1632 err:
1633         be_rx_queues_destroy(adapter);
1634         return -1;
1635 }
1636
1637 static bool event_peek(struct be_eq_obj *eq_obj)
1638 {
1639         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1640         if (!eqe->evt)
1641                 return false;
1642         else
1643                 return true;
1644 }
1645
1646 static irqreturn_t be_intx(int irq, void *dev)
1647 {
1648         struct be_adapter *adapter = dev;
1649         struct be_rx_obj *rxo;
1650         int isr, i, tx = 0 , rx = 0;
1651
1652         if (lancer_chip(adapter)) {
1653                 if (event_peek(&adapter->tx_eq))
1654                         tx = event_handle(adapter, &adapter->tx_eq);
1655                 for_all_rx_queues(adapter, rxo, i) {
1656                         if (event_peek(&rxo->rx_eq))
1657                                 rx |= event_handle(adapter, &rxo->rx_eq);
1658                 }
1659
1660                 if (!(tx || rx))
1661                         return IRQ_NONE;
1662
1663         } else {
1664                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1665                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1666                 if (!isr)
1667                         return IRQ_NONE;
1668
1669                 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1670                         event_handle(adapter, &adapter->tx_eq);
1671
1672                 for_all_rx_queues(adapter, rxo, i) {
1673                         if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1674                                 event_handle(adapter, &rxo->rx_eq);
1675                 }
1676         }
1677
1678         return IRQ_HANDLED;
1679 }
1680
1681 static irqreturn_t be_msix_rx(int irq, void *dev)
1682 {
1683         struct be_rx_obj *rxo = dev;
1684         struct be_adapter *adapter = rxo->adapter;
1685
1686         event_handle(adapter, &rxo->rx_eq);
1687
1688         return IRQ_HANDLED;
1689 }
1690
1691 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1692 {
1693         struct be_adapter *adapter = dev;
1694
1695         event_handle(adapter, &adapter->tx_eq);
1696
1697         return IRQ_HANDLED;
1698 }
1699
1700 static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
1701                         struct be_eth_rx_compl *rxcp)
1702 {
1703         int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1704         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1705
1706         if (err)
1707                 rxo->stats.rxcp_err++;
1708
1709         return (tcp_frame && !err) ? true : false;
1710 }
1711
1712 static int be_poll_rx(struct napi_struct *napi, int budget)
1713 {
1714         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1715         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1716         struct be_adapter *adapter = rxo->adapter;
1717         struct be_queue_info *rx_cq = &rxo->cq;
1718         struct be_eth_rx_compl *rxcp;
1719         u32 work_done;
1720
1721         rxo->stats.rx_polls++;
1722         for (work_done = 0; work_done < budget; work_done++) {
1723                 rxcp = be_rx_compl_get(rxo);
1724                 if (!rxcp)
1725                         break;
1726
1727                 if (do_gro(adapter, rxo, rxcp))
1728                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1729                 else
1730                         be_rx_compl_process(adapter, rxo, rxcp);
1731
1732                 be_rx_compl_reset(rxcp);
1733         }
1734
1735         /* Refill the queue */
1736         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1737                 be_post_rx_frags(rxo);
1738
1739         /* All consumed */
1740         if (work_done < budget) {
1741                 napi_complete(napi);
1742                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1743         } else {
1744                 /* More to be consumed; continue with interrupts disabled */
1745                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1746         }
1747         return work_done;
1748 }
1749
1750 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1751  * For TX/MCC we don't honour budget; consume everything
1752  */
1753 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1754 {
1755         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1756         struct be_adapter *adapter =
1757                 container_of(tx_eq, struct be_adapter, tx_eq);
1758         struct be_queue_info *txq = &adapter->tx_obj.q;
1759         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1760         struct be_eth_tx_compl *txcp;
1761         int tx_compl = 0, mcc_compl, status = 0;
1762         u16 end_idx;
1763
1764         while ((txcp = be_tx_compl_get(tx_cq))) {
1765                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1766                                 wrb_index, txcp);
1767                 be_tx_compl_process(adapter, end_idx);
1768                 tx_compl++;
1769         }
1770
1771         mcc_compl = be_process_mcc(adapter, &status);
1772
1773         napi_complete(napi);
1774
1775         if (mcc_compl) {
1776                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1777                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1778         }
1779
1780         if (tx_compl) {
1781                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1782
1783                 /* As Tx wrbs have been freed up, wake up netdev queue if
1784                  * it was stopped due to lack of tx wrbs.
1785                  */
1786                 if (netif_queue_stopped(adapter->netdev) &&
1787                         atomic_read(&txq->used) < txq->len / 2) {
1788                         netif_wake_queue(adapter->netdev);
1789                 }
1790
1791                 tx_stats(adapter)->be_tx_events++;
1792                 tx_stats(adapter)->be_tx_compl += tx_compl;
1793         }
1794
1795         return 1;
1796 }
1797
1798 void be_detect_dump_ue(struct be_adapter *adapter)
1799 {
1800         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1801         u32 i;
1802
1803         pci_read_config_dword(adapter->pdev,
1804                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1805         pci_read_config_dword(adapter->pdev,
1806                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1807         pci_read_config_dword(adapter->pdev,
1808                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1809         pci_read_config_dword(adapter->pdev,
1810                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1811
1812         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1813         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1814
1815         if (ue_status_lo || ue_status_hi) {
1816                 adapter->ue_detected = true;
1817                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1818         }
1819
1820         if (ue_status_lo) {
1821                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1822                         if (ue_status_lo & 1)
1823                                 dev_err(&adapter->pdev->dev,
1824                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1825                 }
1826         }
1827         if (ue_status_hi) {
1828                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1829                         if (ue_status_hi & 1)
1830                                 dev_err(&adapter->pdev->dev,
1831                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1832                 }
1833         }
1834
1835 }
1836
1837 static void be_worker(struct work_struct *work)
1838 {
1839         struct be_adapter *adapter =
1840                 container_of(work, struct be_adapter, work.work);
1841         struct be_rx_obj *rxo;
1842         int i;
1843
1844         /* when interrupts are not yet enabled, just reap any pending
1845         * mcc completions */
1846         if (!netif_running(adapter->netdev)) {
1847                 int mcc_compl, status = 0;
1848
1849                 mcc_compl = be_process_mcc(adapter, &status);
1850
1851                 if (mcc_compl) {
1852                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1853                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1854                 }
1855                 goto reschedule;
1856         }
1857
1858         if (!adapter->stats_ioctl_sent)
1859                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1860
1861         be_tx_rate_update(adapter);
1862
1863         for_all_rx_queues(adapter, rxo, i) {
1864                 be_rx_rate_update(rxo);
1865                 be_rx_eqd_update(adapter, rxo);
1866
1867                 if (rxo->rx_post_starved) {
1868                         rxo->rx_post_starved = false;
1869                         be_post_rx_frags(rxo);
1870                 }
1871         }
1872         if (!adapter->ue_detected && !lancer_chip(adapter))
1873                 be_detect_dump_ue(adapter);
1874
1875 reschedule:
1876         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1877 }
1878
1879 static void be_msix_disable(struct be_adapter *adapter)
1880 {
1881         if (adapter->msix_enabled) {
1882                 pci_disable_msix(adapter->pdev);
1883                 adapter->msix_enabled = false;
1884         }
1885 }
1886
1887 static int be_num_rxqs_get(struct be_adapter *adapter)
1888 {
1889         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1890                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1891                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1892         } else {
1893                 dev_warn(&adapter->pdev->dev,
1894                         "No support for multiple RX queues\n");
1895                 return 1;
1896         }
1897 }
1898
1899 static void be_msix_enable(struct be_adapter *adapter)
1900 {
1901 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1902         int i, status;
1903
1904         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1905
1906         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1907                 adapter->msix_entries[i].entry = i;
1908
1909         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1910                         adapter->num_rx_qs + 1);
1911         if (status == 0) {
1912                 goto done;
1913         } else if (status >= BE_MIN_MSIX_VECTORS) {
1914                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1915                                 status) == 0) {
1916                         adapter->num_rx_qs = status - 1;
1917                         dev_warn(&adapter->pdev->dev,
1918                                 "Could alloc only %d MSIx vectors. "
1919                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1920                         goto done;
1921                 }
1922         }
1923         return;
1924 done:
1925         adapter->msix_enabled = true;
1926 }
1927
1928 static void be_sriov_enable(struct be_adapter *adapter)
1929 {
1930         be_check_sriov_fn_type(adapter);
1931 #ifdef CONFIG_PCI_IOV
1932         if (be_physfn(adapter) && num_vfs) {
1933                 int status;
1934
1935                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1936                 adapter->sriov_enabled = status ? false : true;
1937         }
1938 #endif
1939 }
1940
1941 static void be_sriov_disable(struct be_adapter *adapter)
1942 {
1943 #ifdef CONFIG_PCI_IOV
1944         if (adapter->sriov_enabled) {
1945                 pci_disable_sriov(adapter->pdev);
1946                 adapter->sriov_enabled = false;
1947         }
1948 #endif
1949 }
1950
1951 static inline int be_msix_vec_get(struct be_adapter *adapter,
1952                                         struct be_eq_obj *eq_obj)
1953 {
1954         return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1955 }
1956
1957 static int be_request_irq(struct be_adapter *adapter,
1958                 struct be_eq_obj *eq_obj,
1959                 void *handler, char *desc, void *context)
1960 {
1961         struct net_device *netdev = adapter->netdev;
1962         int vec;
1963
1964         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1965         vec = be_msix_vec_get(adapter, eq_obj);
1966         return request_irq(vec, handler, 0, eq_obj->desc, context);
1967 }
1968
1969 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1970                         void *context)
1971 {
1972         int vec = be_msix_vec_get(adapter, eq_obj);
1973         free_irq(vec, context);
1974 }
1975
1976 static int be_msix_register(struct be_adapter *adapter)
1977 {
1978         struct be_rx_obj *rxo;
1979         int status, i;
1980         char qname[10];
1981
1982         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1983                                 adapter);
1984         if (status)
1985                 goto err;
1986
1987         for_all_rx_queues(adapter, rxo, i) {
1988                 sprintf(qname, "rxq%d", i);
1989                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1990                                 qname, rxo);
1991                 if (status)
1992                         goto err_msix;
1993         }
1994
1995         return 0;
1996
1997 err_msix:
1998         be_free_irq(adapter, &adapter->tx_eq, adapter);
1999
2000         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2001                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2002
2003 err:
2004         dev_warn(&adapter->pdev->dev,
2005                 "MSIX Request IRQ failed - err %d\n", status);
2006         pci_disable_msix(adapter->pdev);
2007         adapter->msix_enabled = false;
2008         return status;
2009 }
2010
2011 static int be_irq_register(struct be_adapter *adapter)
2012 {
2013         struct net_device *netdev = adapter->netdev;
2014         int status;
2015
2016         if (adapter->msix_enabled) {
2017                 status = be_msix_register(adapter);
2018                 if (status == 0)
2019                         goto done;
2020                 /* INTx is not supported for VF */
2021                 if (!be_physfn(adapter))
2022                         return status;
2023         }
2024
2025         /* INTx */
2026         netdev->irq = adapter->pdev->irq;
2027         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2028                         adapter);
2029         if (status) {
2030                 dev_err(&adapter->pdev->dev,
2031                         "INTx request IRQ failed - err %d\n", status);
2032                 return status;
2033         }
2034 done:
2035         adapter->isr_registered = true;
2036         return 0;
2037 }
2038
2039 static void be_irq_unregister(struct be_adapter *adapter)
2040 {
2041         struct net_device *netdev = adapter->netdev;
2042         struct be_rx_obj *rxo;
2043         int i;
2044
2045         if (!adapter->isr_registered)
2046                 return;
2047
2048         /* INTx */
2049         if (!adapter->msix_enabled) {
2050                 free_irq(netdev->irq, adapter);
2051                 goto done;
2052         }
2053
2054         /* MSIx */
2055         be_free_irq(adapter, &adapter->tx_eq, adapter);
2056
2057         for_all_rx_queues(adapter, rxo, i)
2058                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2059
2060 done:
2061         adapter->isr_registered = false;
2062 }
2063
2064 static int be_close(struct net_device *netdev)
2065 {
2066         struct be_adapter *adapter = netdev_priv(netdev);
2067         struct be_rx_obj *rxo;
2068         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2069         int vec, i;
2070
2071         be_async_mcc_disable(adapter);
2072
2073         netif_stop_queue(netdev);
2074         netif_carrier_off(netdev);
2075         adapter->link_up = false;
2076
2077         if (!lancer_chip(adapter))
2078                 be_intr_set(adapter, false);
2079
2080         if (adapter->msix_enabled) {
2081                 vec = be_msix_vec_get(adapter, tx_eq);
2082                 synchronize_irq(vec);
2083
2084                 for_all_rx_queues(adapter, rxo, i) {
2085                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2086                         synchronize_irq(vec);
2087                 }
2088         } else {
2089                 synchronize_irq(netdev->irq);
2090         }
2091         be_irq_unregister(adapter);
2092
2093         for_all_rx_queues(adapter, rxo, i)
2094                 napi_disable(&rxo->rx_eq.napi);
2095
2096         napi_disable(&tx_eq->napi);
2097
2098         /* Wait for all pending tx completions to arrive so that
2099          * all tx skbs are freed.
2100          */
2101         be_tx_compl_clean(adapter);
2102
2103         return 0;
2104 }
2105
2106 static int be_open(struct net_device *netdev)
2107 {
2108         struct be_adapter *adapter = netdev_priv(netdev);
2109         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2110         struct be_rx_obj *rxo;
2111         bool link_up;
2112         int status, i;
2113         u8 mac_speed;
2114         u16 link_speed;
2115
2116         for_all_rx_queues(adapter, rxo, i) {
2117                 be_post_rx_frags(rxo);
2118                 napi_enable(&rxo->rx_eq.napi);
2119         }
2120         napi_enable(&tx_eq->napi);
2121
2122         be_irq_register(adapter);
2123
2124         if (!lancer_chip(adapter))
2125                 be_intr_set(adapter, true);
2126
2127         /* The evt queues are created in unarmed state; arm them */
2128         for_all_rx_queues(adapter, rxo, i) {
2129                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2130                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2131         }
2132         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2133
2134         /* Now that interrupts are on we can process async mcc */
2135         be_async_mcc_enable(adapter);
2136
2137         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2138                         &link_speed);
2139         if (status)
2140                 goto err;
2141         be_link_status_update(adapter, link_up);
2142
2143         if (be_physfn(adapter)) {
2144                 status = be_vid_config(adapter, false, 0);
2145                 if (status)
2146                         goto err;
2147
2148                 status = be_cmd_set_flow_control(adapter,
2149                                 adapter->tx_fc, adapter->rx_fc);
2150                 if (status)
2151                         goto err;
2152         }
2153
2154         return 0;
2155 err:
2156         be_close(adapter->netdev);
2157         return -EIO;
2158 }
2159
2160 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2161 {
2162         struct be_dma_mem cmd;
2163         int status = 0;
2164         u8 mac[ETH_ALEN];
2165
2166         memset(mac, 0, ETH_ALEN);
2167
2168         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2169         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2170         if (cmd.va == NULL)
2171                 return -1;
2172         memset(cmd.va, 0, cmd.size);
2173
2174         if (enable) {
2175                 status = pci_write_config_dword(adapter->pdev,
2176                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2177                 if (status) {
2178                         dev_err(&adapter->pdev->dev,
2179                                 "Could not enable Wake-on-lan\n");
2180                         pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2181                                         cmd.dma);
2182                         return status;
2183                 }
2184                 status = be_cmd_enable_magic_wol(adapter,
2185                                 adapter->netdev->dev_addr, &cmd);
2186                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2187                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2188         } else {
2189                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2190                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2191                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2192         }
2193
2194         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2195         return status;
2196 }
2197
2198 /*
2199  * Generate a seed MAC address from the PF MAC Address using jhash.
2200  * MAC Address for VFs are assigned incrementally starting from the seed.
2201  * These addresses are programmed in the ASIC by the PF and the VF driver
2202  * queries for the MAC address during its probe.
2203  */
2204 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2205 {
2206         u32 vf = 0;
2207         int status = 0;
2208         u8 mac[ETH_ALEN];
2209
2210         be_vf_eth_addr_generate(adapter, mac);
2211
2212         for (vf = 0; vf < num_vfs; vf++) {
2213                 status = be_cmd_pmac_add(adapter, mac,
2214                                         adapter->vf_cfg[vf].vf_if_handle,
2215                                         &adapter->vf_cfg[vf].vf_pmac_id);
2216                 if (status)
2217                         dev_err(&adapter->pdev->dev,
2218                                 "Mac address add failed for VF %d\n", vf);
2219                 else
2220                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2221
2222                 mac[5] += 1;
2223         }
2224         return status;
2225 }
2226
2227 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2228 {
2229         u32 vf;
2230
2231         for (vf = 0; vf < num_vfs; vf++) {
2232                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2233                         be_cmd_pmac_del(adapter,
2234                                         adapter->vf_cfg[vf].vf_if_handle,
2235                                         adapter->vf_cfg[vf].vf_pmac_id);
2236         }
2237 }
2238
2239 static int be_setup(struct be_adapter *adapter)
2240 {
2241         struct net_device *netdev = adapter->netdev;
2242         u32 cap_flags, en_flags, vf = 0;
2243         int status;
2244         u8 mac[ETH_ALEN];
2245
2246         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2247
2248         if (be_physfn(adapter)) {
2249                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2250                                 BE_IF_FLAGS_PROMISCUOUS |
2251                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2252                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2253
2254                 if (be_multi_rxq(adapter)) {
2255                         cap_flags |= BE_IF_FLAGS_RSS;
2256                         en_flags |= BE_IF_FLAGS_RSS;
2257                 }
2258         }
2259
2260         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2261                         netdev->dev_addr, false/* pmac_invalid */,
2262                         &adapter->if_handle, &adapter->pmac_id, 0);
2263         if (status != 0)
2264                 goto do_none;
2265
2266         if (be_physfn(adapter)) {
2267                 while (vf < num_vfs) {
2268                         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2269                                         | BE_IF_FLAGS_BROADCAST;
2270                         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2271                                         mac, true,
2272                                         &adapter->vf_cfg[vf].vf_if_handle,
2273                                         NULL, vf+1);
2274                         if (status) {
2275                                 dev_err(&adapter->pdev->dev,
2276                                 "Interface Create failed for VF %d\n", vf);
2277                                 goto if_destroy;
2278                         }
2279                         adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2280                         vf++;
2281                 }
2282         } else if (!be_physfn(adapter)) {
2283                 status = be_cmd_mac_addr_query(adapter, mac,
2284                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2285                 if (!status) {
2286                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2287                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2288                 }
2289         }
2290
2291         status = be_tx_queues_create(adapter);
2292         if (status != 0)
2293                 goto if_destroy;
2294
2295         status = be_rx_queues_create(adapter);
2296         if (status != 0)
2297                 goto tx_qs_destroy;
2298
2299         status = be_mcc_queues_create(adapter);
2300         if (status != 0)
2301                 goto rx_qs_destroy;
2302
2303         if (be_physfn(adapter)) {
2304                 status = be_vf_eth_addr_config(adapter);
2305                 if (status)
2306                         goto mcc_q_destroy;
2307         }
2308
2309         adapter->link_speed = -1;
2310
2311         return 0;
2312
2313 mcc_q_destroy:
2314         if (be_physfn(adapter))
2315                 be_vf_eth_addr_rem(adapter);
2316         be_mcc_queues_destroy(adapter);
2317 rx_qs_destroy:
2318         be_rx_queues_destroy(adapter);
2319 tx_qs_destroy:
2320         be_tx_queues_destroy(adapter);
2321 if_destroy:
2322         for (vf = 0; vf < num_vfs; vf++)
2323                 if (adapter->vf_cfg[vf].vf_if_handle)
2324                         be_cmd_if_destroy(adapter,
2325                                         adapter->vf_cfg[vf].vf_if_handle);
2326         be_cmd_if_destroy(adapter, adapter->if_handle);
2327 do_none:
2328         return status;
2329 }
2330
2331 static int be_clear(struct be_adapter *adapter)
2332 {
2333         if (be_physfn(adapter))
2334                 be_vf_eth_addr_rem(adapter);
2335
2336         be_mcc_queues_destroy(adapter);
2337         be_rx_queues_destroy(adapter);
2338         be_tx_queues_destroy(adapter);
2339
2340         be_cmd_if_destroy(adapter, adapter->if_handle);
2341
2342         /* tell fw we're done with firing cmds */
2343         be_cmd_fw_clean(adapter);
2344         return 0;
2345 }
2346
2347
2348 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2349 static bool be_flash_redboot(struct be_adapter *adapter,
2350                         const u8 *p, u32 img_start, int image_size,
2351                         int hdr_size)
2352 {
2353         u32 crc_offset;
2354         u8 flashed_crc[4];
2355         int status;
2356
2357         crc_offset = hdr_size + img_start + image_size - 4;
2358
2359         p += crc_offset;
2360
2361         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2362                         (image_size - 4));
2363         if (status) {
2364                 dev_err(&adapter->pdev->dev,
2365                 "could not get crc from flash, not flashing redboot\n");
2366                 return false;
2367         }
2368
2369         /*update redboot only if crc does not match*/
2370         if (!memcmp(flashed_crc, p, 4))
2371                 return false;
2372         else
2373                 return true;
2374 }
2375
2376 static int be_flash_data(struct be_adapter *adapter,
2377                         const struct firmware *fw,
2378                         struct be_dma_mem *flash_cmd, int num_of_images)
2379
2380 {
2381         int status = 0, i, filehdr_size = 0;
2382         u32 total_bytes = 0, flash_op;
2383         int num_bytes;
2384         const u8 *p = fw->data;
2385         struct be_cmd_write_flashrom *req = flash_cmd->va;
2386         struct flash_comp *pflashcomp;
2387         int num_comp;
2388
2389         struct flash_comp gen3_flash_types[9] = {
2390                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2391                         FLASH_IMAGE_MAX_SIZE_g3},
2392                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2393                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2394                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2395                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2396                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2397                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2398                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2399                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2400                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2401                         FLASH_IMAGE_MAX_SIZE_g3},
2402                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2403                         FLASH_IMAGE_MAX_SIZE_g3},
2404                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2405                         FLASH_IMAGE_MAX_SIZE_g3},
2406                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2407                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2408         };
2409         struct flash_comp gen2_flash_types[8] = {
2410                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2411                         FLASH_IMAGE_MAX_SIZE_g2},
2412                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2413                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2414                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2415                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2416                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2417                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2418                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2419                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2420                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2421                         FLASH_IMAGE_MAX_SIZE_g2},
2422                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2423                         FLASH_IMAGE_MAX_SIZE_g2},
2424                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2425                          FLASH_IMAGE_MAX_SIZE_g2}
2426         };
2427
2428         if (adapter->generation == BE_GEN3) {
2429                 pflashcomp = gen3_flash_types;
2430                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2431                 num_comp = 9;
2432         } else {
2433                 pflashcomp = gen2_flash_types;
2434                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2435                 num_comp = 8;
2436         }
2437         for (i = 0; i < num_comp; i++) {
2438                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2439                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2440                         continue;
2441                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2442                         (!be_flash_redboot(adapter, fw->data,
2443                          pflashcomp[i].offset, pflashcomp[i].size,
2444                          filehdr_size)))
2445                         continue;
2446                 p = fw->data;
2447                 p += filehdr_size + pflashcomp[i].offset
2448                         + (num_of_images * sizeof(struct image_hdr));
2449         if (p + pflashcomp[i].size > fw->data + fw->size)
2450                 return -1;
2451         total_bytes = pflashcomp[i].size;
2452                 while (total_bytes) {
2453                         if (total_bytes > 32*1024)
2454                                 num_bytes = 32*1024;
2455                         else
2456                                 num_bytes = total_bytes;
2457                         total_bytes -= num_bytes;
2458
2459                         if (!total_bytes)
2460                                 flash_op = FLASHROM_OPER_FLASH;
2461                         else
2462                                 flash_op = FLASHROM_OPER_SAVE;
2463                         memcpy(req->params.data_buf, p, num_bytes);
2464                         p += num_bytes;
2465                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2466                                 pflashcomp[i].optype, flash_op, num_bytes);
2467                         if (status) {
2468                                 dev_err(&adapter->pdev->dev,
2469                                         "cmd to write to flash rom failed.\n");
2470                                 return -1;
2471                         }
2472                         yield();
2473                 }
2474         }
2475         return 0;
2476 }
2477
2478 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2479 {
2480         if (fhdr == NULL)
2481                 return 0;
2482         if (fhdr->build[0] == '3')
2483                 return BE_GEN3;
2484         else if (fhdr->build[0] == '2')
2485                 return BE_GEN2;
2486         else
2487                 return 0;
2488 }
2489
2490 int be_load_fw(struct be_adapter *adapter, u8 *func)
2491 {
2492         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2493         const struct firmware *fw;
2494         struct flash_file_hdr_g2 *fhdr;
2495         struct flash_file_hdr_g3 *fhdr3;
2496         struct image_hdr *img_hdr_ptr = NULL;
2497         struct be_dma_mem flash_cmd;
2498         int status, i = 0, num_imgs = 0;
2499         const u8 *p;
2500
2501         if (!netif_running(adapter->netdev)) {
2502                 dev_err(&adapter->pdev->dev,
2503                         "Firmware load not allowed (interface is down)\n");
2504                 return -EPERM;
2505         }
2506
2507         strcpy(fw_file, func);
2508
2509         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2510         if (status)
2511                 goto fw_exit;
2512
2513         p = fw->data;
2514         fhdr = (struct flash_file_hdr_g2 *) p;
2515         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2516
2517         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2518         flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2519                                         &flash_cmd.dma);
2520         if (!flash_cmd.va) {
2521                 status = -ENOMEM;
2522                 dev_err(&adapter->pdev->dev,
2523                         "Memory allocation failure while flashing\n");
2524                 goto fw_exit;
2525         }
2526
2527         if ((adapter->generation == BE_GEN3) &&
2528                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2529                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2530                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2531                 for (i = 0; i < num_imgs; i++) {
2532                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2533                                         (sizeof(struct flash_file_hdr_g3) +
2534                                          i * sizeof(struct image_hdr)));
2535                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2536                                 status = be_flash_data(adapter, fw, &flash_cmd,
2537                                                         num_imgs);
2538                 }
2539         } else if ((adapter->generation == BE_GEN2) &&
2540                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2541                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2542         } else {
2543                 dev_err(&adapter->pdev->dev,
2544                         "UFI and Interface are not compatible for flashing\n");
2545                 status = -1;
2546         }
2547
2548         pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2549                                 flash_cmd.dma);
2550         if (status) {
2551                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2552                 goto fw_exit;
2553         }
2554
2555         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2556
2557 fw_exit:
2558         release_firmware(fw);
2559         return status;
2560 }
2561
2562 static struct net_device_ops be_netdev_ops = {
2563         .ndo_open               = be_open,
2564         .ndo_stop               = be_close,
2565         .ndo_start_xmit         = be_xmit,
2566         .ndo_set_rx_mode        = be_set_multicast_list,
2567         .ndo_set_mac_address    = be_mac_addr_set,
2568         .ndo_change_mtu         = be_change_mtu,
2569         .ndo_validate_addr      = eth_validate_addr,
2570         .ndo_vlan_rx_register   = be_vlan_register,
2571         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2572         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2573         .ndo_set_vf_mac         = be_set_vf_mac,
2574         .ndo_set_vf_vlan        = be_set_vf_vlan,
2575         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2576         .ndo_get_vf_config      = be_get_vf_config
2577 };
2578
2579 static void be_netdev_init(struct net_device *netdev)
2580 {
2581         struct be_adapter *adapter = netdev_priv(netdev);
2582         struct be_rx_obj *rxo;
2583         int i;
2584
2585         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2586                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2587                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2588                 NETIF_F_GRO | NETIF_F_TSO6;
2589
2590         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2591                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2592
2593         if (lancer_chip(adapter))
2594                 netdev->vlan_features |= NETIF_F_TSO6;
2595
2596         netdev->flags |= IFF_MULTICAST;
2597
2598         adapter->rx_csum = true;
2599
2600         /* Default settings for Rx and Tx flow control */
2601         adapter->rx_fc = true;
2602         adapter->tx_fc = true;
2603
2604         netif_set_gso_max_size(netdev, 65535);
2605
2606         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2607
2608         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2609
2610         for_all_rx_queues(adapter, rxo, i)
2611                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2612                                 BE_NAPI_WEIGHT);
2613
2614         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2615                 BE_NAPI_WEIGHT);
2616
2617         netif_stop_queue(netdev);
2618 }
2619
2620 static void be_unmap_pci_bars(struct be_adapter *adapter)
2621 {
2622         if (adapter->csr)
2623                 iounmap(adapter->csr);
2624         if (adapter->db)
2625                 iounmap(adapter->db);
2626         if (adapter->pcicfg && be_physfn(adapter))
2627                 iounmap(adapter->pcicfg);
2628 }
2629
2630 static int be_map_pci_bars(struct be_adapter *adapter)
2631 {
2632         u8 __iomem *addr;
2633         int pcicfg_reg, db_reg;
2634
2635         if (lancer_chip(adapter)) {
2636                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2637                         pci_resource_len(adapter->pdev, 0));
2638                 if (addr == NULL)
2639                         return -ENOMEM;
2640                 adapter->db = addr;
2641                 return 0;
2642         }
2643
2644         if (be_physfn(adapter)) {
2645                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2646                                 pci_resource_len(adapter->pdev, 2));
2647                 if (addr == NULL)
2648                         return -ENOMEM;
2649                 adapter->csr = addr;
2650         }
2651
2652         if (adapter->generation == BE_GEN2) {
2653                 pcicfg_reg = 1;
2654                 db_reg = 4;
2655         } else {
2656                 pcicfg_reg = 0;
2657                 if (be_physfn(adapter))
2658                         db_reg = 4;
2659                 else
2660                         db_reg = 0;
2661         }
2662         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2663                                 pci_resource_len(adapter->pdev, db_reg));
2664         if (addr == NULL)
2665                 goto pci_map_err;
2666         adapter->db = addr;
2667
2668         if (be_physfn(adapter)) {
2669                 addr = ioremap_nocache(
2670                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2671                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2672                 if (addr == NULL)
2673                         goto pci_map_err;
2674                 adapter->pcicfg = addr;
2675         } else
2676                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2677
2678         return 0;
2679 pci_map_err:
2680         be_unmap_pci_bars(adapter);
2681         return -ENOMEM;
2682 }
2683
2684
2685 static void be_ctrl_cleanup(struct be_adapter *adapter)
2686 {
2687         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2688
2689         be_unmap_pci_bars(adapter);
2690
2691         if (mem->va)
2692                 pci_free_consistent(adapter->pdev, mem->size,
2693                         mem->va, mem->dma);
2694
2695         mem = &adapter->mc_cmd_mem;
2696         if (mem->va)
2697                 pci_free_consistent(adapter->pdev, mem->size,
2698                         mem->va, mem->dma);
2699 }
2700
2701 static int be_ctrl_init(struct be_adapter *adapter)
2702 {
2703         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2704         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2705         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2706         int status;
2707
2708         status = be_map_pci_bars(adapter);
2709         if (status)
2710                 goto done;
2711
2712         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2713         mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2714                                 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2715         if (!mbox_mem_alloc->va) {
2716                 status = -ENOMEM;
2717                 goto unmap_pci_bars;
2718         }
2719
2720         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2721         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2722         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2723         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2724
2725         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2726         mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2727                         &mc_cmd_mem->dma);
2728         if (mc_cmd_mem->va == NULL) {
2729                 status = -ENOMEM;
2730                 goto free_mbox;
2731         }
2732         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2733
2734         spin_lock_init(&adapter->mbox_lock);
2735         spin_lock_init(&adapter->mcc_lock);
2736         spin_lock_init(&adapter->mcc_cq_lock);
2737
2738         init_completion(&adapter->flash_compl);
2739         pci_save_state(adapter->pdev);
2740         return 0;
2741
2742 free_mbox:
2743         pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2744                 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2745
2746 unmap_pci_bars:
2747         be_unmap_pci_bars(adapter);
2748
2749 done:
2750         return status;
2751 }
2752
2753 static void be_stats_cleanup(struct be_adapter *adapter)
2754 {
2755         struct be_dma_mem *cmd = &adapter->stats_cmd;
2756
2757         if (cmd->va)
2758                 pci_free_consistent(adapter->pdev, cmd->size,
2759                         cmd->va, cmd->dma);
2760 }
2761
2762 static int be_stats_init(struct be_adapter *adapter)
2763 {
2764         struct be_dma_mem *cmd = &adapter->stats_cmd;
2765
2766         cmd->size = sizeof(struct be_cmd_req_get_stats);
2767         cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2768         if (cmd->va == NULL)
2769                 return -1;
2770         memset(cmd->va, 0, cmd->size);
2771         return 0;
2772 }
2773
2774 static void __devexit be_remove(struct pci_dev *pdev)
2775 {
2776         struct be_adapter *adapter = pci_get_drvdata(pdev);
2777
2778         if (!adapter)
2779                 return;
2780
2781         cancel_delayed_work_sync(&adapter->work);
2782
2783         unregister_netdev(adapter->netdev);
2784
2785         be_clear(adapter);
2786
2787         be_stats_cleanup(adapter);
2788
2789         be_ctrl_cleanup(adapter);
2790
2791         be_sriov_disable(adapter);
2792
2793         be_msix_disable(adapter);
2794
2795         pci_set_drvdata(pdev, NULL);
2796         pci_release_regions(pdev);
2797         pci_disable_device(pdev);
2798
2799         free_netdev(adapter->netdev);
2800 }
2801
2802 static int be_get_config(struct be_adapter *adapter)
2803 {
2804         int status;
2805         u8 mac[ETH_ALEN];
2806
2807         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2808         if (status)
2809                 return status;
2810
2811         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2812                         &adapter->function_mode, &adapter->function_caps);
2813         if (status)
2814                 return status;
2815
2816         memset(mac, 0, ETH_ALEN);
2817
2818         if (be_physfn(adapter)) {
2819                 status = be_cmd_mac_addr_query(adapter, mac,
2820                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2821
2822                 if (status)
2823                         return status;
2824
2825                 if (!is_valid_ether_addr(mac))
2826                         return -EADDRNOTAVAIL;
2827
2828                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2829                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2830         }
2831
2832         if (adapter->function_mode & 0x400)
2833                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2834         else
2835                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2836
2837         return 0;
2838 }
2839
2840 static int be_dev_family_check(struct be_adapter *adapter)
2841 {
2842         struct pci_dev *pdev = adapter->pdev;
2843         u32 sli_intf = 0, if_type;
2844
2845         switch (pdev->device) {
2846         case BE_DEVICE_ID1:
2847         case OC_DEVICE_ID1:
2848                 adapter->generation = BE_GEN2;
2849                 break;
2850         case BE_DEVICE_ID2:
2851         case OC_DEVICE_ID2:
2852                 adapter->generation = BE_GEN3;
2853                 break;
2854         case OC_DEVICE_ID3:
2855                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2856                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2857                                                 SLI_INTF_IF_TYPE_SHIFT;
2858
2859                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2860                         if_type != 0x02) {
2861                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2862                         return -EINVAL;
2863                 }
2864                 if (num_vfs > 0) {
2865                         dev_err(&pdev->dev, "VFs not supported\n");
2866                         return -EINVAL;
2867                 }
2868                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2869                                          SLI_INTF_FAMILY_SHIFT);
2870                 adapter->generation = BE_GEN3;
2871                 break;
2872         default:
2873                 adapter->generation = 0;
2874         }
2875         return 0;
2876 }
2877
2878 static int __devinit be_probe(struct pci_dev *pdev,
2879                         const struct pci_device_id *pdev_id)
2880 {
2881         int status = 0;
2882         struct be_adapter *adapter;
2883         struct net_device *netdev;
2884
2885         status = pci_enable_device(pdev);
2886         if (status)
2887                 goto do_none;
2888
2889         status = pci_request_regions(pdev, DRV_NAME);
2890         if (status)
2891                 goto disable_dev;
2892         pci_set_master(pdev);
2893
2894         netdev = alloc_etherdev(sizeof(struct be_adapter));
2895         if (netdev == NULL) {
2896                 status = -ENOMEM;
2897                 goto rel_reg;
2898         }
2899         adapter = netdev_priv(netdev);
2900         adapter->pdev = pdev;
2901         pci_set_drvdata(pdev, adapter);
2902
2903         status = be_dev_family_check(adapter);
2904         if (!status)
2905                 goto free_netdev;
2906
2907         adapter->netdev = netdev;
2908         SET_NETDEV_DEV(netdev, &pdev->dev);
2909
2910         status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2911         if (!status) {
2912                 netdev->features |= NETIF_F_HIGHDMA;
2913         } else {
2914                 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2915                 if (status) {
2916                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2917                         goto free_netdev;
2918                 }
2919         }
2920
2921         be_sriov_enable(adapter);
2922
2923         status = be_ctrl_init(adapter);
2924         if (status)
2925                 goto free_netdev;
2926
2927         /* sync up with fw's ready state */
2928         if (be_physfn(adapter)) {
2929                 status = be_cmd_POST(adapter);
2930                 if (status)
2931                         goto ctrl_clean;
2932         }
2933
2934         /* tell fw we're ready to fire cmds */
2935         status = be_cmd_fw_init(adapter);
2936         if (status)
2937                 goto ctrl_clean;
2938
2939         if (be_physfn(adapter)) {
2940                 status = be_cmd_reset_function(adapter);
2941                 if (status)
2942                         goto ctrl_clean;
2943         }
2944
2945         status = be_stats_init(adapter);
2946         if (status)
2947                 goto ctrl_clean;
2948
2949         status = be_get_config(adapter);
2950         if (status)
2951                 goto stats_clean;
2952
2953         be_msix_enable(adapter);
2954
2955         INIT_DELAYED_WORK(&adapter->work, be_worker);
2956
2957         status = be_setup(adapter);
2958         if (status)
2959                 goto msix_disable;
2960
2961         be_netdev_init(netdev);
2962         status = register_netdev(netdev);
2963         if (status != 0)
2964                 goto unsetup;
2965         netif_carrier_off(netdev);
2966
2967         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2968         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2969         return 0;
2970
2971 unsetup:
2972         be_clear(adapter);
2973 msix_disable:
2974         be_msix_disable(adapter);
2975 stats_clean:
2976         be_stats_cleanup(adapter);
2977 ctrl_clean:
2978         be_ctrl_cleanup(adapter);
2979 free_netdev:
2980         be_sriov_disable(adapter);
2981         free_netdev(netdev);
2982         pci_set_drvdata(pdev, NULL);
2983 rel_reg:
2984         pci_release_regions(pdev);
2985 disable_dev:
2986         pci_disable_device(pdev);
2987 do_none:
2988         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
2989         return status;
2990 }
2991
2992 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2993 {
2994         struct be_adapter *adapter = pci_get_drvdata(pdev);
2995         struct net_device *netdev =  adapter->netdev;
2996
2997         if (adapter->wol)
2998                 be_setup_wol(adapter, true);
2999
3000         netif_device_detach(netdev);
3001         if (netif_running(netdev)) {
3002                 rtnl_lock();
3003                 be_close(netdev);
3004                 rtnl_unlock();
3005         }
3006         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3007         be_clear(adapter);
3008
3009         pci_save_state(pdev);
3010         pci_disable_device(pdev);
3011         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3012         return 0;
3013 }
3014
3015 static int be_resume(struct pci_dev *pdev)
3016 {
3017         int status = 0;
3018         struct be_adapter *adapter = pci_get_drvdata(pdev);
3019         struct net_device *netdev =  adapter->netdev;
3020
3021         netif_device_detach(netdev);
3022
3023         status = pci_enable_device(pdev);
3024         if (status)
3025                 return status;
3026
3027         pci_set_power_state(pdev, 0);
3028         pci_restore_state(pdev);
3029
3030         /* tell fw we're ready to fire cmds */
3031         status = be_cmd_fw_init(adapter);
3032         if (status)
3033                 return status;
3034
3035         be_setup(adapter);
3036         if (netif_running(netdev)) {
3037                 rtnl_lock();
3038                 be_open(netdev);
3039                 rtnl_unlock();
3040         }
3041         netif_device_attach(netdev);
3042
3043         if (adapter->wol)
3044                 be_setup_wol(adapter, false);
3045         return 0;
3046 }
3047
3048 /*
3049  * An FLR will stop BE from DMAing any data.
3050  */
3051 static void be_shutdown(struct pci_dev *pdev)
3052 {
3053         struct be_adapter *adapter = pci_get_drvdata(pdev);
3054         struct net_device *netdev =  adapter->netdev;
3055
3056         netif_device_detach(netdev);
3057
3058         be_cmd_reset_function(adapter);
3059
3060         if (adapter->wol)
3061                 be_setup_wol(adapter, true);
3062
3063         pci_disable_device(pdev);
3064 }
3065
3066 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3067                                 pci_channel_state_t state)
3068 {
3069         struct be_adapter *adapter = pci_get_drvdata(pdev);
3070         struct net_device *netdev =  adapter->netdev;
3071
3072         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3073
3074         adapter->eeh_err = true;
3075
3076         netif_device_detach(netdev);
3077
3078         if (netif_running(netdev)) {
3079                 rtnl_lock();
3080                 be_close(netdev);
3081                 rtnl_unlock();
3082         }
3083         be_clear(adapter);
3084
3085         if (state == pci_channel_io_perm_failure)
3086                 return PCI_ERS_RESULT_DISCONNECT;
3087
3088         pci_disable_device(pdev);
3089
3090         return PCI_ERS_RESULT_NEED_RESET;
3091 }
3092
3093 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3094 {
3095         struct be_adapter *adapter = pci_get_drvdata(pdev);
3096         int status;
3097
3098         dev_info(&adapter->pdev->dev, "EEH reset\n");
3099         adapter->eeh_err = false;
3100
3101         status = pci_enable_device(pdev);
3102         if (status)
3103                 return PCI_ERS_RESULT_DISCONNECT;
3104
3105         pci_set_master(pdev);
3106         pci_set_power_state(pdev, 0);
3107         pci_restore_state(pdev);
3108
3109         /* Check if card is ok and fw is ready */
3110         status = be_cmd_POST(adapter);
3111         if (status)
3112                 return PCI_ERS_RESULT_DISCONNECT;
3113
3114         return PCI_ERS_RESULT_RECOVERED;
3115 }
3116
3117 static void be_eeh_resume(struct pci_dev *pdev)
3118 {
3119         int status = 0;
3120         struct be_adapter *adapter = pci_get_drvdata(pdev);
3121         struct net_device *netdev =  adapter->netdev;
3122
3123         dev_info(&adapter->pdev->dev, "EEH resume\n");
3124
3125         pci_save_state(pdev);
3126
3127         /* tell fw we're ready to fire cmds */
3128         status = be_cmd_fw_init(adapter);
3129         if (status)
3130                 goto err;
3131
3132         status = be_setup(adapter);
3133         if (status)
3134                 goto err;
3135
3136         if (netif_running(netdev)) {
3137                 status = be_open(netdev);
3138                 if (status)
3139                         goto err;
3140         }
3141         netif_device_attach(netdev);
3142         return;
3143 err:
3144         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3145 }
3146
3147 static struct pci_error_handlers be_eeh_handlers = {
3148         .error_detected = be_eeh_err_detected,
3149         .slot_reset = be_eeh_reset,
3150         .resume = be_eeh_resume,
3151 };
3152
3153 static struct pci_driver be_driver = {
3154         .name = DRV_NAME,
3155         .id_table = be_dev_ids,
3156         .probe = be_probe,
3157         .remove = be_remove,
3158         .suspend = be_suspend,
3159         .resume = be_resume,
3160         .shutdown = be_shutdown,
3161         .err_handler = &be_eeh_handlers
3162 };
3163
3164 static int __init be_init_module(void)
3165 {
3166         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3167             rx_frag_size != 2048) {
3168                 printk(KERN_WARNING DRV_NAME
3169                         " : Module param rx_frag_size must be 2048/4096/8192."
3170                         " Using 2048\n");
3171                 rx_frag_size = 2048;
3172         }
3173
3174         if (num_vfs > 32) {
3175                 printk(KERN_WARNING DRV_NAME
3176                         " : Module param num_vfs must not be greater than 32."
3177                         "Using 32\n");
3178                 num_vfs = 32;
3179         }
3180
3181         return pci_register_driver(&be_driver);
3182 }
3183 module_init(be_init_module);
3184
3185 static void __exit be_exit_module(void)
3186 {
3187         pci_unregister_driver(&be_driver);
3188 }
3189 module_exit(be_exit_module);