be2net: variable name change
[firefly-linux-kernel-4.4.55.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 {
121         return (adapter->num_rx_qs > 1);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126         struct be_dma_mem *mem = &q->dma_mem;
127         if (mem->va)
128                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129                                   mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133                 u16 len, u16 entry_size)
134 {
135         struct be_dma_mem *mem = &q->dma_mem;
136
137         memset(q, 0, sizeof(*q));
138         q->len = len;
139         q->entry_size = entry_size;
140         mem->size = len * entry_size;
141         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142                                      GFP_KERNEL);
143         if (!mem->va)
144                 return -1;
145         memset(mem->va, 0, mem->size);
146         return 0;
147 }
148
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 {
151         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152         u32 reg = ioread32(addr);
153         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
154
155         if (adapter->eeh_err)
156                 return;
157
158         if (!enabled && enable)
159                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160         else if (enabled && !enable)
161                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else
163                 return;
164
165         iowrite32(reg, addr);
166 }
167
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
169 {
170         u32 val = 0;
171         val |= qid & DB_RQ_RING_ID_MASK;
172         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
173
174         wmb();
175         iowrite32(val, adapter->db + DB_RQ_OFFSET);
176 }
177
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_TXULP_RING_ID_MASK;
182         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
186 }
187
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189                 bool arm, bool clear_int, u16 num_popped)
190 {
191         u32 val = 0;
192         val |= qid & DB_EQ_RING_ID_MASK;
193         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
195
196         if (adapter->eeh_err)
197                 return;
198
199         if (arm)
200                 val |= 1 << DB_EQ_REARM_SHIFT;
201         if (clear_int)
202                 val |= 1 << DB_EQ_CLR_SHIFT;
203         val |= 1 << DB_EQ_EVNT_SHIFT;
204         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205         iowrite32(val, adapter->db + DB_EQ_OFFSET);
206 }
207
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_CQ_RING_ID_MASK;
212         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_err)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_CQ_REARM_SHIFT;
220         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221         iowrite32(val, adapter->db + DB_CQ_OFFSET);
222 }
223
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
225 {
226         struct be_adapter *adapter = netdev_priv(netdev);
227         struct sockaddr *addr = p;
228         int status = 0;
229
230         if (!is_valid_ether_addr(addr->sa_data))
231                 return -EADDRNOTAVAIL;
232
233         /* MAC addr configuration will be done in hardware for VFs
234          * by their corresponding PFs. Just copy to netdev addr here
235          */
236         if (!be_physfn(adapter))
237                 goto netdev_addr;
238
239         status = be_cmd_pmac_del(adapter, adapter->if_handle,
240                                 adapter->pmac_id, 0);
241         if (status)
242                 return status;
243
244         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245                                 adapter->if_handle, &adapter->pmac_id, 0);
246 netdev_addr:
247         if (!status)
248                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
249
250         return status;
251 }
252
253 void netdev_stats_update(struct be_adapter *adapter)
254 {
255         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
256         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257         struct be_port_rxf_stats *port_stats =
258                         &rxf_stats->port[adapter->port_num];
259         struct net_device_stats *dev_stats = &adapter->netdev->stats;
260         struct be_erx_stats *erx_stats = &hw_stats->erx;
261         struct be_rx_obj *rxo;
262         int i;
263
264         memset(dev_stats, 0, sizeof(*dev_stats));
265         for_all_rx_queues(adapter, rxo, i) {
266                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
267                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269                 /*  no space in linux buffers: best possible approximation */
270                 dev_stats->rx_dropped +=
271                         erx_stats->rx_drops_no_fragments[rxo->q.id];
272         }
273
274         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
276
277         /* bad pkts received */
278         dev_stats->rx_errors = port_stats->rx_crc_errors +
279                 port_stats->rx_alignment_symbol_errors +
280                 port_stats->rx_in_range_errors +
281                 port_stats->rx_out_range_errors +
282                 port_stats->rx_frame_too_long +
283                 port_stats->rx_dropped_too_small +
284                 port_stats->rx_dropped_too_short +
285                 port_stats->rx_dropped_header_too_small +
286                 port_stats->rx_dropped_tcp_length +
287                 port_stats->rx_dropped_runt +
288                 port_stats->rx_tcp_checksum_errs +
289                 port_stats->rx_ip_checksum_errs +
290                 port_stats->rx_udp_checksum_errs;
291
292         /* detailed rx errors */
293         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
294                 port_stats->rx_out_range_errors +
295                 port_stats->rx_frame_too_long;
296
297         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
298
299         /* frame alignment errors */
300         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
301
302         /* receiver fifo overrun */
303         /* drops_no_pbuf is no per i/f, it's per BE card */
304         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
305                                         port_stats->rx_input_fifo_overflow +
306                                         rxf_stats->rx_drops_no_pbuf;
307 }
308
309 void be_link_status_update(struct be_adapter *adapter, bool link_up)
310 {
311         struct net_device *netdev = adapter->netdev;
312
313         /* If link came up or went down */
314         if (adapter->link_up != link_up) {
315                 adapter->link_speed = -1;
316                 if (link_up) {
317                         netif_carrier_on(netdev);
318                         printk(KERN_INFO "%s: Link up\n", netdev->name);
319                 } else {
320                         netif_carrier_off(netdev);
321                         printk(KERN_INFO "%s: Link down\n", netdev->name);
322                 }
323                 adapter->link_up = link_up;
324         }
325 }
326
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
329 {
330         struct be_eq_obj *rx_eq = &rxo->rx_eq;
331         struct be_rx_stats *stats = &rxo->stats;
332         ulong now = jiffies;
333         u32 eqd;
334
335         if (!rx_eq->enable_aic)
336                 return;
337
338         /* Wrapped around */
339         if (time_before(now, stats->rx_fps_jiffies)) {
340                 stats->rx_fps_jiffies = now;
341                 return;
342         }
343
344         /* Update once a second */
345         if ((now - stats->rx_fps_jiffies) < HZ)
346                 return;
347
348         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
349                         ((now - stats->rx_fps_jiffies) / HZ);
350
351         stats->rx_fps_jiffies = now;
352         stats->prev_rx_frags = stats->rx_frags;
353         eqd = stats->rx_fps / 110000;
354         eqd = eqd << 3;
355         if (eqd > rx_eq->max_eqd)
356                 eqd = rx_eq->max_eqd;
357         if (eqd < rx_eq->min_eqd)
358                 eqd = rx_eq->min_eqd;
359         if (eqd < 10)
360                 eqd = 0;
361         if (eqd != rx_eq->cur_eqd)
362                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
363
364         rx_eq->cur_eqd = eqd;
365 }
366
367 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368 {
369         u64 rate = bytes;
370
371         do_div(rate, ticks / HZ);
372         rate <<= 3;                     /* bytes/sec -> bits/sec */
373         do_div(rate, 1000000ul);        /* MB/Sec */
374
375         return rate;
376 }
377
378 static void be_tx_rate_update(struct be_adapter *adapter)
379 {
380         struct be_tx_stats *stats = tx_stats(adapter);
381         ulong now = jiffies;
382
383         /* Wrapped around? */
384         if (time_before(now, stats->be_tx_jiffies)) {
385                 stats->be_tx_jiffies = now;
386                 return;
387         }
388
389         /* Update tx rate once in two seconds */
390         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
391                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392                                                   - stats->be_tx_bytes_prev,
393                                                  now - stats->be_tx_jiffies);
394                 stats->be_tx_jiffies = now;
395                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396         }
397 }
398
399 static void be_tx_stats_update(struct be_adapter *adapter,
400                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
401 {
402         struct be_tx_stats *stats = tx_stats(adapter);
403         stats->be_tx_reqs++;
404         stats->be_tx_wrbs += wrb_cnt;
405         stats->be_tx_bytes += copied;
406         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
407         if (stopped)
408                 stats->be_tx_stops++;
409 }
410
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413                                                                 bool *dummy)
414 {
415         int cnt = (skb->len > skb->data_len);
416
417         cnt += skb_shinfo(skb)->nr_frags;
418
419         /* to account for hdr wrb */
420         cnt++;
421         if (lancer_chip(adapter) || !(cnt & 1)) {
422                 *dummy = false;
423         } else {
424                 /* add a dummy to make it an even num */
425                 cnt++;
426                 *dummy = true;
427         }
428         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429         return cnt;
430 }
431
432 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433 {
434         wrb->frag_pa_hi = upper_32_bits(addr);
435         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437 }
438
439 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
441 {
442         u8 vlan_prio = 0;
443         u16 vlan_tag = 0;
444
445         memset(hdr, 0, sizeof(*hdr));
446
447         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
449         if (skb_is_gso(skb)) {
450                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452                         hdr, skb_shinfo(skb)->gso_size);
453                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
454                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455                 if (lancer_chip(adapter) && adapter->sli_family  ==
456                                                         LANCER_A0_SLI_FAMILY) {
457                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458                         if (is_tcp_pkt(skb))
459                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460                                                                 tcpcs, hdr, 1);
461                         else if (is_udp_pkt(skb))
462                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463                                                                 udpcs, hdr, 1);
464                 }
465         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466                 if (is_tcp_pkt(skb))
467                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468                 else if (is_udp_pkt(skb))
469                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470         }
471
472         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
473                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
474                 vlan_tag = vlan_tx_tag_get(skb);
475                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476                 /* If vlan priority provided by OS is NOT in available bmap */
477                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479                                         adapter->recommended_prio;
480                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
481         }
482
483         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487 }
488
489 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
490                 bool unmap_single)
491 {
492         dma_addr_t dma;
493
494         be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497         if (wrb->frag_len) {
498                 if (unmap_single)
499                         dma_unmap_single(dev, dma, wrb->frag_len,
500                                          DMA_TO_DEVICE);
501                 else
502                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
503         }
504 }
505
506 static int make_tx_wrbs(struct be_adapter *adapter,
507                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
508 {
509         dma_addr_t busaddr;
510         int i, copied = 0;
511         struct device *dev = &adapter->pdev->dev;
512         struct sk_buff *first_skb = skb;
513         struct be_queue_info *txq = &adapter->tx_obj.q;
514         struct be_eth_wrb *wrb;
515         struct be_eth_hdr_wrb *hdr;
516         bool map_single = false;
517         u16 map_head;
518
519         hdr = queue_head_node(txq);
520         queue_head_inc(txq);
521         map_head = txq->head;
522
523         if (skb->len > skb->data_len) {
524                 int len = skb_headlen(skb);
525                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
526                 if (dma_mapping_error(dev, busaddr))
527                         goto dma_err;
528                 map_single = true;
529                 wrb = queue_head_node(txq);
530                 wrb_fill(wrb, busaddr, len);
531                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532                 queue_head_inc(txq);
533                 copied += len;
534         }
535
536         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537                 struct skb_frag_struct *frag =
538                         &skb_shinfo(skb)->frags[i];
539                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540                                        frag->size, DMA_TO_DEVICE);
541                 if (dma_mapping_error(dev, busaddr))
542                         goto dma_err;
543                 wrb = queue_head_node(txq);
544                 wrb_fill(wrb, busaddr, frag->size);
545                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
546                 queue_head_inc(txq);
547                 copied += frag->size;
548         }
549
550         if (dummy_wrb) {
551                 wrb = queue_head_node(txq);
552                 wrb_fill(wrb, 0, 0);
553                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
554                 queue_head_inc(txq);
555         }
556
557         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
558         be_dws_cpu_to_le(hdr, sizeof(*hdr));
559
560         return copied;
561 dma_err:
562         txq->head = map_head;
563         while (copied) {
564                 wrb = queue_head_node(txq);
565                 unmap_tx_frag(dev, wrb, map_single);
566                 map_single = false;
567                 copied -= wrb->frag_len;
568                 queue_head_inc(txq);
569         }
570         return 0;
571 }
572
573 static netdev_tx_t be_xmit(struct sk_buff *skb,
574                         struct net_device *netdev)
575 {
576         struct be_adapter *adapter = netdev_priv(netdev);
577         struct be_tx_obj *tx_obj = &adapter->tx_obj;
578         struct be_queue_info *txq = &tx_obj->q;
579         u32 wrb_cnt = 0, copied = 0;
580         u32 start = txq->head;
581         bool dummy_wrb, stopped = false;
582
583         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
584
585         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
586         if (copied) {
587                 /* record the sent skb in the sent_skb table */
588                 BUG_ON(tx_obj->sent_skb_list[start]);
589                 tx_obj->sent_skb_list[start] = skb;
590
591                 /* Ensure txq has space for the next skb; Else stop the queue
592                  * *BEFORE* ringing the tx doorbell, so that we serialze the
593                  * tx compls of the current transmit which'll wake up the queue
594                  */
595                 atomic_add(wrb_cnt, &txq->used);
596                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
597                                                                 txq->len) {
598                         netif_stop_queue(netdev);
599                         stopped = true;
600                 }
601
602                 be_txq_notify(adapter, txq->id, wrb_cnt);
603
604                 be_tx_stats_update(adapter, wrb_cnt, copied,
605                                 skb_shinfo(skb)->gso_segs, stopped);
606         } else {
607                 txq->head = start;
608                 dev_kfree_skb_any(skb);
609         }
610         return NETDEV_TX_OK;
611 }
612
613 static int be_change_mtu(struct net_device *netdev, int new_mtu)
614 {
615         struct be_adapter *adapter = netdev_priv(netdev);
616         if (new_mtu < BE_MIN_MTU ||
617                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
618                                         (ETH_HLEN + ETH_FCS_LEN))) {
619                 dev_info(&adapter->pdev->dev,
620                         "MTU must be between %d and %d bytes\n",
621                         BE_MIN_MTU,
622                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
623                 return -EINVAL;
624         }
625         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
626                         netdev->mtu, new_mtu);
627         netdev->mtu = new_mtu;
628         return 0;
629 }
630
631 /*
632  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
633  * If the user configures more, place BE in vlan promiscuous mode.
634  */
635 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
636 {
637         u16 vtag[BE_NUM_VLANS_SUPPORTED];
638         u16 ntags = 0, i;
639         int status = 0;
640         u32 if_handle;
641
642         if (vf) {
643                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
644                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
645                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
646         }
647
648         if (adapter->vlans_added <= adapter->max_vlans)  {
649                 /* Construct VLAN Table to give to HW */
650                 for (i = 0; i < VLAN_N_VID; i++) {
651                         if (adapter->vlan_tag[i]) {
652                                 vtag[ntags] = cpu_to_le16(i);
653                                 ntags++;
654                         }
655                 }
656                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
657                                         vtag, ntags, 1, 0);
658         } else {
659                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660                                         NULL, 0, 1, 1);
661         }
662
663         return status;
664 }
665
666 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
667 {
668         struct be_adapter *adapter = netdev_priv(netdev);
669
670         adapter->vlan_grp = grp;
671 }
672
673 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
674 {
675         struct be_adapter *adapter = netdev_priv(netdev);
676
677         adapter->vlans_added++;
678         if (!be_physfn(adapter))
679                 return;
680
681         adapter->vlan_tag[vid] = 1;
682         if (adapter->vlans_added <= (adapter->max_vlans + 1))
683                 be_vid_config(adapter, false, 0);
684 }
685
686 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
687 {
688         struct be_adapter *adapter = netdev_priv(netdev);
689
690         adapter->vlans_added--;
691         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
692
693         if (!be_physfn(adapter))
694                 return;
695
696         adapter->vlan_tag[vid] = 0;
697         if (adapter->vlans_added <= adapter->max_vlans)
698                 be_vid_config(adapter, false, 0);
699 }
700
701 static void be_set_multicast_list(struct net_device *netdev)
702 {
703         struct be_adapter *adapter = netdev_priv(netdev);
704
705         if (netdev->flags & IFF_PROMISC) {
706                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
707                 adapter->promiscuous = true;
708                 goto done;
709         }
710
711         /* BE was previously in promiscous mode; disable it */
712         if (adapter->promiscuous) {
713                 adapter->promiscuous = false;
714                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
715         }
716
717         /* Enable multicast promisc if num configured exceeds what we support */
718         if (netdev->flags & IFF_ALLMULTI ||
719             netdev_mc_count(netdev) > BE_MAX_MC) {
720                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
721                                 &adapter->mc_cmd_mem);
722                 goto done;
723         }
724
725         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
726                 &adapter->mc_cmd_mem);
727 done:
728         return;
729 }
730
731 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
732 {
733         struct be_adapter *adapter = netdev_priv(netdev);
734         int status;
735
736         if (!adapter->sriov_enabled)
737                 return -EPERM;
738
739         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
740                 return -EINVAL;
741
742         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
743                 status = be_cmd_pmac_del(adapter,
744                                         adapter->vf_cfg[vf].vf_if_handle,
745                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
746
747         status = be_cmd_pmac_add(adapter, mac,
748                                 adapter->vf_cfg[vf].vf_if_handle,
749                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
750
751         if (status)
752                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
753                                 mac, vf);
754         else
755                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
756
757         return status;
758 }
759
760 static int be_get_vf_config(struct net_device *netdev, int vf,
761                         struct ifla_vf_info *vi)
762 {
763         struct be_adapter *adapter = netdev_priv(netdev);
764
765         if (!adapter->sriov_enabled)
766                 return -EPERM;
767
768         if (vf >= num_vfs)
769                 return -EINVAL;
770
771         vi->vf = vf;
772         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
773         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
774         vi->qos = 0;
775         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
776
777         return 0;
778 }
779
780 static int be_set_vf_vlan(struct net_device *netdev,
781                         int vf, u16 vlan, u8 qos)
782 {
783         struct be_adapter *adapter = netdev_priv(netdev);
784         int status = 0;
785
786         if (!adapter->sriov_enabled)
787                 return -EPERM;
788
789         if ((vf >= num_vfs) || (vlan > 4095))
790                 return -EINVAL;
791
792         if (vlan) {
793                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
794                 adapter->vlans_added++;
795         } else {
796                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
797                 adapter->vlans_added--;
798         }
799
800         status = be_vid_config(adapter, true, vf);
801
802         if (status)
803                 dev_info(&adapter->pdev->dev,
804                                 "VLAN %d config on VF %d failed\n", vlan, vf);
805         return status;
806 }
807
808 static int be_set_vf_tx_rate(struct net_device *netdev,
809                         int vf, int rate)
810 {
811         struct be_adapter *adapter = netdev_priv(netdev);
812         int status = 0;
813
814         if (!adapter->sriov_enabled)
815                 return -EPERM;
816
817         if ((vf >= num_vfs) || (rate < 0))
818                 return -EINVAL;
819
820         if (rate > 10000)
821                 rate = 10000;
822
823         adapter->vf_cfg[vf].vf_tx_rate = rate;
824         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
825
826         if (status)
827                 dev_info(&adapter->pdev->dev,
828                                 "tx rate %d on VF %d failed\n", rate, vf);
829         return status;
830 }
831
832 static void be_rx_rate_update(struct be_rx_obj *rxo)
833 {
834         struct be_rx_stats *stats = &rxo->stats;
835         ulong now = jiffies;
836
837         /* Wrapped around */
838         if (time_before(now, stats->rx_jiffies)) {
839                 stats->rx_jiffies = now;
840                 return;
841         }
842
843         /* Update the rate once in two seconds */
844         if ((now - stats->rx_jiffies) < 2 * HZ)
845                 return;
846
847         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
848                                 now - stats->rx_jiffies);
849         stats->rx_jiffies = now;
850         stats->rx_bytes_prev = stats->rx_bytes;
851 }
852
853 static void be_rx_stats_update(struct be_rx_obj *rxo,
854                 u32 pktsize, u16 numfrags, u8 pkt_type)
855 {
856         struct be_rx_stats *stats = &rxo->stats;
857
858         stats->rx_compl++;
859         stats->rx_frags += numfrags;
860         stats->rx_bytes += pktsize;
861         stats->rx_pkts++;
862         if (pkt_type == BE_MULTICAST_PACKET)
863                 stats->rx_mcast_pkts++;
864 }
865
866 static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
867 {
868         u8 l4_cksm, ipv6, ipcksm;
869
870         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
871         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
872         ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
873
874         /* Ignore ipcksm for ipv6 pkts */
875         return l4_cksm && (ipcksm || ipv6);
876 }
877
878 static struct be_rx_page_info *
879 get_rx_page_info(struct be_adapter *adapter,
880                 struct be_rx_obj *rxo,
881                 u16 frag_idx)
882 {
883         struct be_rx_page_info *rx_page_info;
884         struct be_queue_info *rxq = &rxo->q;
885
886         rx_page_info = &rxo->page_info_tbl[frag_idx];
887         BUG_ON(!rx_page_info->page);
888
889         if (rx_page_info->last_page_user) {
890                 dma_unmap_page(&adapter->pdev->dev,
891                                dma_unmap_addr(rx_page_info, bus),
892                                adapter->big_page_size, DMA_FROM_DEVICE);
893                 rx_page_info->last_page_user = false;
894         }
895
896         atomic_dec(&rxq->used);
897         return rx_page_info;
898 }
899
900 /* Throwaway the data in the Rx completion */
901 static void be_rx_compl_discard(struct be_adapter *adapter,
902                 struct be_rx_obj *rxo,
903                 struct be_eth_rx_compl *rxcp)
904 {
905         struct be_queue_info *rxq = &rxo->q;
906         struct be_rx_page_info *page_info;
907         u16 rxq_idx, i, num_rcvd;
908
909         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
910         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
911
912          /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
913         if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
914
915                 rxo->last_frag_index = rxq_idx;
916
917                 for (i = 0; i < num_rcvd; i++) {
918                         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
919                         put_page(page_info->page);
920                         memset(page_info, 0, sizeof(*page_info));
921                         index_inc(&rxq_idx, rxq->len);
922                 }
923         }
924 }
925
926 /*
927  * skb_fill_rx_data forms a complete skb for an ether frame
928  * indicated by rxcp.
929  */
930 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
931                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
932                         u16 num_rcvd)
933 {
934         struct be_queue_info *rxq = &rxo->q;
935         struct be_rx_page_info *page_info;
936         u16 rxq_idx, i, j;
937         u32 pktsize, hdr_len, curr_frag_len, size;
938         u8 *start;
939         u8 pkt_type;
940
941         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
942         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
943         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
944
945         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
946
947         start = page_address(page_info->page) + page_info->page_offset;
948         prefetch(start);
949
950         /* Copy data in the first descriptor of this completion */
951         curr_frag_len = min(pktsize, rx_frag_size);
952
953         /* Copy the header portion into skb_data */
954         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
955         memcpy(skb->data, start, hdr_len);
956         skb->len = curr_frag_len;
957         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
958                 /* Complete packet has now been moved to data */
959                 put_page(page_info->page);
960                 skb->data_len = 0;
961                 skb->tail += curr_frag_len;
962         } else {
963                 skb_shinfo(skb)->nr_frags = 1;
964                 skb_shinfo(skb)->frags[0].page = page_info->page;
965                 skb_shinfo(skb)->frags[0].page_offset =
966                                         page_info->page_offset + hdr_len;
967                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
968                 skb->data_len = curr_frag_len - hdr_len;
969                 skb->tail += hdr_len;
970         }
971         page_info->page = NULL;
972
973         if (pktsize <= rx_frag_size) {
974                 BUG_ON(num_rcvd != 1);
975                 goto done;
976         }
977
978         /* More frags present for this completion */
979         size = pktsize;
980         for (i = 1, j = 0; i < num_rcvd; i++) {
981                 size -= curr_frag_len;
982                 index_inc(&rxq_idx, rxq->len);
983                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
984
985                 curr_frag_len = min(size, rx_frag_size);
986
987                 /* Coalesce all frags from the same physical page in one slot */
988                 if (page_info->page_offset == 0) {
989                         /* Fresh page */
990                         j++;
991                         skb_shinfo(skb)->frags[j].page = page_info->page;
992                         skb_shinfo(skb)->frags[j].page_offset =
993                                                         page_info->page_offset;
994                         skb_shinfo(skb)->frags[j].size = 0;
995                         skb_shinfo(skb)->nr_frags++;
996                 } else {
997                         put_page(page_info->page);
998                 }
999
1000                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1001                 skb->len += curr_frag_len;
1002                 skb->data_len += curr_frag_len;
1003
1004                 page_info->page = NULL;
1005         }
1006         BUG_ON(j > MAX_SKB_FRAGS);
1007
1008 done:
1009         be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
1010 }
1011
1012 /* Process the RX completion indicated by rxcp when GRO is disabled */
1013 static void be_rx_compl_process(struct be_adapter *adapter,
1014                         struct be_rx_obj *rxo,
1015                         struct be_eth_rx_compl *rxcp)
1016 {
1017         struct sk_buff *skb;
1018         u32 vlanf, vid;
1019         u16 num_rcvd;
1020         u8 vtm;
1021
1022         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1023
1024         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1025         if (unlikely(!skb)) {
1026                 if (net_ratelimit())
1027                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1028                 be_rx_compl_discard(adapter, rxo, rxcp);
1029                 return;
1030         }
1031
1032         skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1033
1034         if (likely(adapter->rx_csum && csum_passed(rxcp)))
1035                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1036         else
1037                 skb_checksum_none_assert(skb);
1038
1039         skb->truesize = skb->len + sizeof(struct sk_buff);
1040         skb->protocol = eth_type_trans(skb, adapter->netdev);
1041
1042         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1043         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1044
1045         /* vlanf could be wrongly set in some cards.
1046          * ignore if vtm is not set */
1047         if ((adapter->function_mode & 0x400) && !vtm)
1048                 vlanf = 0;
1049
1050         if (unlikely(vlanf)) {
1051                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1052                         kfree_skb(skb);
1053                         return;
1054                 }
1055                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1056                 if (!lancer_chip(adapter))
1057                         vid = swab16(vid);
1058                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1059         } else {
1060                 netif_receive_skb(skb);
1061         }
1062 }
1063
1064 /* Process the RX completion indicated by rxcp when GRO is enabled */
1065 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1066                 struct be_rx_obj *rxo,
1067                 struct be_eth_rx_compl *rxcp)
1068 {
1069         struct be_rx_page_info *page_info;
1070         struct sk_buff *skb = NULL;
1071         struct be_queue_info *rxq = &rxo->q;
1072         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1073         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1074         u16 i, rxq_idx = 0, vid, j;
1075         u8 vtm;
1076         u8 pkt_type;
1077
1078         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1079         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1080         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1081         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1082         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1083         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1084
1085         /* vlanf could be wrongly set in some cards.
1086          * ignore if vtm is not set */
1087         if ((adapter->function_mode & 0x400) && !vtm)
1088                 vlanf = 0;
1089
1090         skb = napi_get_frags(&eq_obj->napi);
1091         if (!skb) {
1092                 be_rx_compl_discard(adapter, rxo, rxcp);
1093                 return;
1094         }
1095
1096         remaining = pkt_size;
1097         for (i = 0, j = -1; i < num_rcvd; i++) {
1098                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1099
1100                 curr_frag_len = min(remaining, rx_frag_size);
1101
1102                 /* Coalesce all frags from the same physical page in one slot */
1103                 if (i == 0 || page_info->page_offset == 0) {
1104                         /* First frag or Fresh page */
1105                         j++;
1106                         skb_shinfo(skb)->frags[j].page = page_info->page;
1107                         skb_shinfo(skb)->frags[j].page_offset =
1108                                                         page_info->page_offset;
1109                         skb_shinfo(skb)->frags[j].size = 0;
1110                 } else {
1111                         put_page(page_info->page);
1112                 }
1113                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1114
1115                 remaining -= curr_frag_len;
1116                 index_inc(&rxq_idx, rxq->len);
1117                 memset(page_info, 0, sizeof(*page_info));
1118         }
1119         BUG_ON(j > MAX_SKB_FRAGS);
1120
1121         skb_shinfo(skb)->nr_frags = j + 1;
1122         skb->len = pkt_size;
1123         skb->data_len = pkt_size;
1124         skb->truesize += pkt_size;
1125         skb->ip_summed = CHECKSUM_UNNECESSARY;
1126
1127         if (likely(!vlanf)) {
1128                 napi_gro_frags(&eq_obj->napi);
1129         } else {
1130                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1131                 if (!lancer_chip(adapter))
1132                         vid = swab16(vid);
1133
1134                 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1135                         return;
1136
1137                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1138         }
1139
1140         be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1141 }
1142
1143 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1144 {
1145         struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1146
1147         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1148                 return NULL;
1149
1150         rmb();
1151         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1152
1153         queue_tail_inc(&rxo->cq);
1154         return rxcp;
1155 }
1156
1157 /* To reset the valid bit, we need to reset the whole word as
1158  * when walking the queue the valid entries are little-endian
1159  * and invalid entries are host endian
1160  */
1161 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1162 {
1163         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1164 }
1165
1166 static inline struct page *be_alloc_pages(u32 size)
1167 {
1168         gfp_t alloc_flags = GFP_ATOMIC;
1169         u32 order = get_order(size);
1170         if (order > 0)
1171                 alloc_flags |= __GFP_COMP;
1172         return  alloc_pages(alloc_flags, order);
1173 }
1174
1175 /*
1176  * Allocate a page, split it to fragments of size rx_frag_size and post as
1177  * receive buffers to BE
1178  */
1179 static void be_post_rx_frags(struct be_rx_obj *rxo)
1180 {
1181         struct be_adapter *adapter = rxo->adapter;
1182         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1183         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1184         struct be_queue_info *rxq = &rxo->q;
1185         struct page *pagep = NULL;
1186         struct be_eth_rx_d *rxd;
1187         u64 page_dmaaddr = 0, frag_dmaaddr;
1188         u32 posted, page_offset = 0;
1189
1190         page_info = &rxo->page_info_tbl[rxq->head];
1191         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1192                 if (!pagep) {
1193                         pagep = be_alloc_pages(adapter->big_page_size);
1194                         if (unlikely(!pagep)) {
1195                                 rxo->stats.rx_post_fail++;
1196                                 break;
1197                         }
1198                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1199                                                     0, adapter->big_page_size,
1200                                                     DMA_FROM_DEVICE);
1201                         page_info->page_offset = 0;
1202                 } else {
1203                         get_page(pagep);
1204                         page_info->page_offset = page_offset + rx_frag_size;
1205                 }
1206                 page_offset = page_info->page_offset;
1207                 page_info->page = pagep;
1208                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1209                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1210
1211                 rxd = queue_head_node(rxq);
1212                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1213                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1214
1215                 /* Any space left in the current big page for another frag? */
1216                 if ((page_offset + rx_frag_size + rx_frag_size) >
1217                                         adapter->big_page_size) {
1218                         pagep = NULL;
1219                         page_info->last_page_user = true;
1220                 }
1221
1222                 prev_page_info = page_info;
1223                 queue_head_inc(rxq);
1224                 page_info = &page_info_tbl[rxq->head];
1225         }
1226         if (pagep)
1227                 prev_page_info->last_page_user = true;
1228
1229         if (posted) {
1230                 atomic_add(posted, &rxq->used);
1231                 be_rxq_notify(adapter, rxq->id, posted);
1232         } else if (atomic_read(&rxq->used) == 0) {
1233                 /* Let be_worker replenish when memory is available */
1234                 rxo->rx_post_starved = true;
1235         }
1236 }
1237
1238 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1239 {
1240         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1241
1242         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1243                 return NULL;
1244
1245         rmb();
1246         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1247
1248         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1249
1250         queue_tail_inc(tx_cq);
1251         return txcp;
1252 }
1253
1254 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1255 {
1256         struct be_queue_info *txq = &adapter->tx_obj.q;
1257         struct be_eth_wrb *wrb;
1258         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1259         struct sk_buff *sent_skb;
1260         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1261         bool unmap_skb_hdr = true;
1262
1263         sent_skb = sent_skbs[txq->tail];
1264         BUG_ON(!sent_skb);
1265         sent_skbs[txq->tail] = NULL;
1266
1267         /* skip header wrb */
1268         queue_tail_inc(txq);
1269
1270         do {
1271                 cur_index = txq->tail;
1272                 wrb = queue_tail_node(txq);
1273                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1274                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1275                 unmap_skb_hdr = false;
1276
1277                 num_wrbs++;
1278                 queue_tail_inc(txq);
1279         } while (cur_index != last_index);
1280
1281         atomic_sub(num_wrbs, &txq->used);
1282
1283         kfree_skb(sent_skb);
1284 }
1285
1286 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1287 {
1288         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1289
1290         if (!eqe->evt)
1291                 return NULL;
1292
1293         rmb();
1294         eqe->evt = le32_to_cpu(eqe->evt);
1295         queue_tail_inc(&eq_obj->q);
1296         return eqe;
1297 }
1298
1299 static int event_handle(struct be_adapter *adapter,
1300                         struct be_eq_obj *eq_obj)
1301 {
1302         struct be_eq_entry *eqe;
1303         u16 num = 0;
1304
1305         while ((eqe = event_get(eq_obj)) != NULL) {
1306                 eqe->evt = 0;
1307                 num++;
1308         }
1309
1310         /* Deal with any spurious interrupts that come
1311          * without events
1312          */
1313         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1314         if (num)
1315                 napi_schedule(&eq_obj->napi);
1316
1317         return num;
1318 }
1319
1320 /* Just read and notify events without processing them.
1321  * Used at the time of destroying event queues */
1322 static void be_eq_clean(struct be_adapter *adapter,
1323                         struct be_eq_obj *eq_obj)
1324 {
1325         struct be_eq_entry *eqe;
1326         u16 num = 0;
1327
1328         while ((eqe = event_get(eq_obj)) != NULL) {
1329                 eqe->evt = 0;
1330                 num++;
1331         }
1332
1333         if (num)
1334                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1335 }
1336
1337 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1338 {
1339         struct be_rx_page_info *page_info;
1340         struct be_queue_info *rxq = &rxo->q;
1341         struct be_queue_info *rx_cq = &rxo->cq;
1342         struct be_eth_rx_compl *rxcp;
1343         u16 tail;
1344
1345         /* First cleanup pending rx completions */
1346         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1347                 be_rx_compl_discard(adapter, rxo, rxcp);
1348                 be_rx_compl_reset(rxcp);
1349                 be_cq_notify(adapter, rx_cq->id, false, 1);
1350         }
1351
1352         /* Then free posted rx buffer that were not used */
1353         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1354         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1355                 page_info = get_rx_page_info(adapter, rxo, tail);
1356                 put_page(page_info->page);
1357                 memset(page_info, 0, sizeof(*page_info));
1358         }
1359         BUG_ON(atomic_read(&rxq->used));
1360 }
1361
1362 static void be_tx_compl_clean(struct be_adapter *adapter)
1363 {
1364         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1365         struct be_queue_info *txq = &adapter->tx_obj.q;
1366         struct be_eth_tx_compl *txcp;
1367         u16 end_idx, cmpl = 0, timeo = 0;
1368         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1369         struct sk_buff *sent_skb;
1370         bool dummy_wrb;
1371
1372         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1373         do {
1374                 while ((txcp = be_tx_compl_get(tx_cq))) {
1375                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1376                                         wrb_index, txcp);
1377                         be_tx_compl_process(adapter, end_idx);
1378                         cmpl++;
1379                 }
1380                 if (cmpl) {
1381                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1382                         cmpl = 0;
1383                 }
1384
1385                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1386                         break;
1387
1388                 mdelay(1);
1389         } while (true);
1390
1391         if (atomic_read(&txq->used))
1392                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1393                         atomic_read(&txq->used));
1394
1395         /* free posted tx for which compls will never arrive */
1396         while (atomic_read(&txq->used)) {
1397                 sent_skb = sent_skbs[txq->tail];
1398                 end_idx = txq->tail;
1399                 index_adv(&end_idx,
1400                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1401                         txq->len);
1402                 be_tx_compl_process(adapter, end_idx);
1403         }
1404 }
1405
1406 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1407 {
1408         struct be_queue_info *q;
1409
1410         q = &adapter->mcc_obj.q;
1411         if (q->created)
1412                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1413         be_queue_free(adapter, q);
1414
1415         q = &adapter->mcc_obj.cq;
1416         if (q->created)
1417                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1418         be_queue_free(adapter, q);
1419 }
1420
1421 /* Must be called only after TX qs are created as MCC shares TX EQ */
1422 static int be_mcc_queues_create(struct be_adapter *adapter)
1423 {
1424         struct be_queue_info *q, *cq;
1425
1426         /* Alloc MCC compl queue */
1427         cq = &adapter->mcc_obj.cq;
1428         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1429                         sizeof(struct be_mcc_compl)))
1430                 goto err;
1431
1432         /* Ask BE to create MCC compl queue; share TX's eq */
1433         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1434                 goto mcc_cq_free;
1435
1436         /* Alloc MCC queue */
1437         q = &adapter->mcc_obj.q;
1438         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1439                 goto mcc_cq_destroy;
1440
1441         /* Ask BE to create MCC queue */
1442         if (be_cmd_mccq_create(adapter, q, cq))
1443                 goto mcc_q_free;
1444
1445         return 0;
1446
1447 mcc_q_free:
1448         be_queue_free(adapter, q);
1449 mcc_cq_destroy:
1450         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1451 mcc_cq_free:
1452         be_queue_free(adapter, cq);
1453 err:
1454         return -1;
1455 }
1456
1457 static void be_tx_queues_destroy(struct be_adapter *adapter)
1458 {
1459         struct be_queue_info *q;
1460
1461         q = &adapter->tx_obj.q;
1462         if (q->created)
1463                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1464         be_queue_free(adapter, q);
1465
1466         q = &adapter->tx_obj.cq;
1467         if (q->created)
1468                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1469         be_queue_free(adapter, q);
1470
1471         /* Clear any residual events */
1472         be_eq_clean(adapter, &adapter->tx_eq);
1473
1474         q = &adapter->tx_eq.q;
1475         if (q->created)
1476                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1477         be_queue_free(adapter, q);
1478 }
1479
1480 static int be_tx_queues_create(struct be_adapter *adapter)
1481 {
1482         struct be_queue_info *eq, *q, *cq;
1483
1484         adapter->tx_eq.max_eqd = 0;
1485         adapter->tx_eq.min_eqd = 0;
1486         adapter->tx_eq.cur_eqd = 96;
1487         adapter->tx_eq.enable_aic = false;
1488         /* Alloc Tx Event queue */
1489         eq = &adapter->tx_eq.q;
1490         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1491                 return -1;
1492
1493         /* Ask BE to create Tx Event queue */
1494         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1495                 goto tx_eq_free;
1496
1497         adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1498
1499
1500         /* Alloc TX eth compl queue */
1501         cq = &adapter->tx_obj.cq;
1502         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1503                         sizeof(struct be_eth_tx_compl)))
1504                 goto tx_eq_destroy;
1505
1506         /* Ask BE to create Tx eth compl queue */
1507         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1508                 goto tx_cq_free;
1509
1510         /* Alloc TX eth queue */
1511         q = &adapter->tx_obj.q;
1512         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1513                 goto tx_cq_destroy;
1514
1515         /* Ask BE to create Tx eth queue */
1516         if (be_cmd_txq_create(adapter, q, cq))
1517                 goto tx_q_free;
1518         return 0;
1519
1520 tx_q_free:
1521         be_queue_free(adapter, q);
1522 tx_cq_destroy:
1523         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1524 tx_cq_free:
1525         be_queue_free(adapter, cq);
1526 tx_eq_destroy:
1527         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1528 tx_eq_free:
1529         be_queue_free(adapter, eq);
1530         return -1;
1531 }
1532
1533 static void be_rx_queues_destroy(struct be_adapter *adapter)
1534 {
1535         struct be_queue_info *q;
1536         struct be_rx_obj *rxo;
1537         int i;
1538
1539         for_all_rx_queues(adapter, rxo, i) {
1540                 q = &rxo->q;
1541                 if (q->created) {
1542                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1543                         /* After the rxq is invalidated, wait for a grace time
1544                          * of 1ms for all dma to end and the flush compl to
1545                          * arrive
1546                          */
1547                         mdelay(1);
1548                         be_rx_q_clean(adapter, rxo);
1549                 }
1550                 be_queue_free(adapter, q);
1551
1552                 q = &rxo->cq;
1553                 if (q->created)
1554                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1555                 be_queue_free(adapter, q);
1556
1557                 /* Clear any residual events */
1558                 q = &rxo->rx_eq.q;
1559                 if (q->created) {
1560                         be_eq_clean(adapter, &rxo->rx_eq);
1561                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1562                 }
1563                 be_queue_free(adapter, q);
1564         }
1565 }
1566
1567 static int be_rx_queues_create(struct be_adapter *adapter)
1568 {
1569         struct be_queue_info *eq, *q, *cq;
1570         struct be_rx_obj *rxo;
1571         int rc, i;
1572
1573         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1574         for_all_rx_queues(adapter, rxo, i) {
1575                 rxo->adapter = adapter;
1576                 /* Init last_frag_index so that the frag index in the first
1577                  * completion will never match */
1578                 rxo->last_frag_index = 0xffff;
1579                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1580                 rxo->rx_eq.enable_aic = true;
1581
1582                 /* EQ */
1583                 eq = &rxo->rx_eq.q;
1584                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1585                                         sizeof(struct be_eq_entry));
1586                 if (rc)
1587                         goto err;
1588
1589                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1590                 if (rc)
1591                         goto err;
1592
1593                 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1594
1595                 /* CQ */
1596                 cq = &rxo->cq;
1597                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1598                                 sizeof(struct be_eth_rx_compl));
1599                 if (rc)
1600                         goto err;
1601
1602                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1603                 if (rc)
1604                         goto err;
1605                 /* Rx Q */
1606                 q = &rxo->q;
1607                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1608                                 sizeof(struct be_eth_rx_d));
1609                 if (rc)
1610                         goto err;
1611
1612                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1613                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1614                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1615                 if (rc)
1616                         goto err;
1617         }
1618
1619         if (be_multi_rxq(adapter)) {
1620                 u8 rsstable[MAX_RSS_QS];
1621
1622                 for_all_rss_queues(adapter, rxo, i)
1623                         rsstable[i] = rxo->rss_id;
1624
1625                 rc = be_cmd_rss_config(adapter, rsstable,
1626                         adapter->num_rx_qs - 1);
1627                 if (rc)
1628                         goto err;
1629         }
1630
1631         return 0;
1632 err:
1633         be_rx_queues_destroy(adapter);
1634         return -1;
1635 }
1636
1637 static bool event_peek(struct be_eq_obj *eq_obj)
1638 {
1639         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1640         if (!eqe->evt)
1641                 return false;
1642         else
1643                 return true;
1644 }
1645
1646 static irqreturn_t be_intx(int irq, void *dev)
1647 {
1648         struct be_adapter *adapter = dev;
1649         struct be_rx_obj *rxo;
1650         int isr, i, tx = 0 , rx = 0;
1651
1652         if (lancer_chip(adapter)) {
1653                 if (event_peek(&adapter->tx_eq))
1654                         tx = event_handle(adapter, &adapter->tx_eq);
1655                 for_all_rx_queues(adapter, rxo, i) {
1656                         if (event_peek(&rxo->rx_eq))
1657                                 rx |= event_handle(adapter, &rxo->rx_eq);
1658                 }
1659
1660                 if (!(tx || rx))
1661                         return IRQ_NONE;
1662
1663         } else {
1664                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1665                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1666                 if (!isr)
1667                         return IRQ_NONE;
1668
1669                 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1670                         event_handle(adapter, &adapter->tx_eq);
1671
1672                 for_all_rx_queues(adapter, rxo, i) {
1673                         if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1674                                 event_handle(adapter, &rxo->rx_eq);
1675                 }
1676         }
1677
1678         return IRQ_HANDLED;
1679 }
1680
1681 static irqreturn_t be_msix_rx(int irq, void *dev)
1682 {
1683         struct be_rx_obj *rxo = dev;
1684         struct be_adapter *adapter = rxo->adapter;
1685
1686         event_handle(adapter, &rxo->rx_eq);
1687
1688         return IRQ_HANDLED;
1689 }
1690
1691 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1692 {
1693         struct be_adapter *adapter = dev;
1694
1695         event_handle(adapter, &adapter->tx_eq);
1696
1697         return IRQ_HANDLED;
1698 }
1699
1700 static inline bool do_gro(struct be_rx_obj *rxo,
1701                         struct be_eth_rx_compl *rxcp, u8 err)
1702 {
1703         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1704
1705         if (err)
1706                 rxo->stats.rxcp_err++;
1707
1708         return (tcp_frame && !err) ? true : false;
1709 }
1710
1711 static int be_poll_rx(struct napi_struct *napi, int budget)
1712 {
1713         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1714         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1715         struct be_adapter *adapter = rxo->adapter;
1716         struct be_queue_info *rx_cq = &rxo->cq;
1717         struct be_eth_rx_compl *rxcp;
1718         u32 work_done;
1719         u16 frag_index, num_rcvd;
1720         u8 err;
1721
1722         rxo->stats.rx_polls++;
1723         for (work_done = 0; work_done < budget; work_done++) {
1724                 rxcp = be_rx_compl_get(rxo);
1725                 if (!rxcp)
1726                         break;
1727
1728                 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1729                 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1730                                                                 rxcp);
1731                 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1732                                                                 rxcp);
1733
1734                 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1735                 if (likely(frag_index != rxo->last_frag_index &&
1736                                 num_rcvd != 0)) {
1737                         rxo->last_frag_index = frag_index;
1738
1739                         if (do_gro(rxo, rxcp, err))
1740                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1741                         else
1742                                 be_rx_compl_process(adapter, rxo, rxcp);
1743                 }
1744
1745                 be_rx_compl_reset(rxcp);
1746         }
1747
1748         /* Refill the queue */
1749         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1750                 be_post_rx_frags(rxo);
1751
1752         /* All consumed */
1753         if (work_done < budget) {
1754                 napi_complete(napi);
1755                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1756         } else {
1757                 /* More to be consumed; continue with interrupts disabled */
1758                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1759         }
1760         return work_done;
1761 }
1762
1763 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1764  * For TX/MCC we don't honour budget; consume everything
1765  */
1766 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1767 {
1768         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1769         struct be_adapter *adapter =
1770                 container_of(tx_eq, struct be_adapter, tx_eq);
1771         struct be_queue_info *txq = &adapter->tx_obj.q;
1772         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1773         struct be_eth_tx_compl *txcp;
1774         int tx_compl = 0, mcc_compl, status = 0;
1775         u16 end_idx;
1776
1777         while ((txcp = be_tx_compl_get(tx_cq))) {
1778                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1779                                 wrb_index, txcp);
1780                 be_tx_compl_process(adapter, end_idx);
1781                 tx_compl++;
1782         }
1783
1784         mcc_compl = be_process_mcc(adapter, &status);
1785
1786         napi_complete(napi);
1787
1788         if (mcc_compl) {
1789                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1790                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1791         }
1792
1793         if (tx_compl) {
1794                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1795
1796                 /* As Tx wrbs have been freed up, wake up netdev queue if
1797                  * it was stopped due to lack of tx wrbs.
1798                  */
1799                 if (netif_queue_stopped(adapter->netdev) &&
1800                         atomic_read(&txq->used) < txq->len / 2) {
1801                         netif_wake_queue(adapter->netdev);
1802                 }
1803
1804                 tx_stats(adapter)->be_tx_events++;
1805                 tx_stats(adapter)->be_tx_compl += tx_compl;
1806         }
1807
1808         return 1;
1809 }
1810
1811 void be_detect_dump_ue(struct be_adapter *adapter)
1812 {
1813         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1814         u32 i;
1815
1816         pci_read_config_dword(adapter->pdev,
1817                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1818         pci_read_config_dword(adapter->pdev,
1819                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1820         pci_read_config_dword(adapter->pdev,
1821                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1822         pci_read_config_dword(adapter->pdev,
1823                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1824
1825         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1826         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1827
1828         if (ue_status_lo || ue_status_hi) {
1829                 adapter->ue_detected = true;
1830                 adapter->eeh_err = true;
1831                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1832         }
1833
1834         if (ue_status_lo) {
1835                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1836                         if (ue_status_lo & 1)
1837                                 dev_err(&adapter->pdev->dev,
1838                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1839                 }
1840         }
1841         if (ue_status_hi) {
1842                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1843                         if (ue_status_hi & 1)
1844                                 dev_err(&adapter->pdev->dev,
1845                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1846                 }
1847         }
1848
1849 }
1850
1851 static void be_worker(struct work_struct *work)
1852 {
1853         struct be_adapter *adapter =
1854                 container_of(work, struct be_adapter, work.work);
1855         struct be_rx_obj *rxo;
1856         int i;
1857
1858         /* when interrupts are not yet enabled, just reap any pending
1859         * mcc completions */
1860         if (!netif_running(adapter->netdev)) {
1861                 int mcc_compl, status = 0;
1862
1863                 mcc_compl = be_process_mcc(adapter, &status);
1864
1865                 if (mcc_compl) {
1866                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1867                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1868                 }
1869
1870                 if (!adapter->ue_detected && !lancer_chip(adapter))
1871                         be_detect_dump_ue(adapter);
1872
1873                 goto reschedule;
1874         }
1875
1876         if (!adapter->stats_cmd_sent)
1877                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1878
1879         be_tx_rate_update(adapter);
1880
1881         for_all_rx_queues(adapter, rxo, i) {
1882                 be_rx_rate_update(rxo);
1883                 be_rx_eqd_update(adapter, rxo);
1884
1885                 if (rxo->rx_post_starved) {
1886                         rxo->rx_post_starved = false;
1887                         be_post_rx_frags(rxo);
1888                 }
1889         }
1890         if (!adapter->ue_detected && !lancer_chip(adapter))
1891                 be_detect_dump_ue(adapter);
1892
1893 reschedule:
1894         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1895 }
1896
1897 static void be_msix_disable(struct be_adapter *adapter)
1898 {
1899         if (adapter->msix_enabled) {
1900                 pci_disable_msix(adapter->pdev);
1901                 adapter->msix_enabled = false;
1902         }
1903 }
1904
1905 static int be_num_rxqs_get(struct be_adapter *adapter)
1906 {
1907         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1908                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1909                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1910         } else {
1911                 dev_warn(&adapter->pdev->dev,
1912                         "No support for multiple RX queues\n");
1913                 return 1;
1914         }
1915 }
1916
1917 static void be_msix_enable(struct be_adapter *adapter)
1918 {
1919 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1920         int i, status;
1921
1922         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1923
1924         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1925                 adapter->msix_entries[i].entry = i;
1926
1927         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1928                         adapter->num_rx_qs + 1);
1929         if (status == 0) {
1930                 goto done;
1931         } else if (status >= BE_MIN_MSIX_VECTORS) {
1932                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1933                                 status) == 0) {
1934                         adapter->num_rx_qs = status - 1;
1935                         dev_warn(&adapter->pdev->dev,
1936                                 "Could alloc only %d MSIx vectors. "
1937                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1938                         goto done;
1939                 }
1940         }
1941         return;
1942 done:
1943         adapter->msix_enabled = true;
1944 }
1945
1946 static void be_sriov_enable(struct be_adapter *adapter)
1947 {
1948         be_check_sriov_fn_type(adapter);
1949 #ifdef CONFIG_PCI_IOV
1950         if (be_physfn(adapter) && num_vfs) {
1951                 int status;
1952
1953                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1954                 adapter->sriov_enabled = status ? false : true;
1955         }
1956 #endif
1957 }
1958
1959 static void be_sriov_disable(struct be_adapter *adapter)
1960 {
1961 #ifdef CONFIG_PCI_IOV
1962         if (adapter->sriov_enabled) {
1963                 pci_disable_sriov(adapter->pdev);
1964                 adapter->sriov_enabled = false;
1965         }
1966 #endif
1967 }
1968
1969 static inline int be_msix_vec_get(struct be_adapter *adapter,
1970                                         struct be_eq_obj *eq_obj)
1971 {
1972         return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1973 }
1974
1975 static int be_request_irq(struct be_adapter *adapter,
1976                 struct be_eq_obj *eq_obj,
1977                 void *handler, char *desc, void *context)
1978 {
1979         struct net_device *netdev = adapter->netdev;
1980         int vec;
1981
1982         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1983         vec = be_msix_vec_get(adapter, eq_obj);
1984         return request_irq(vec, handler, 0, eq_obj->desc, context);
1985 }
1986
1987 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1988                         void *context)
1989 {
1990         int vec = be_msix_vec_get(adapter, eq_obj);
1991         free_irq(vec, context);
1992 }
1993
1994 static int be_msix_register(struct be_adapter *adapter)
1995 {
1996         struct be_rx_obj *rxo;
1997         int status, i;
1998         char qname[10];
1999
2000         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2001                                 adapter);
2002         if (status)
2003                 goto err;
2004
2005         for_all_rx_queues(adapter, rxo, i) {
2006                 sprintf(qname, "rxq%d", i);
2007                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2008                                 qname, rxo);
2009                 if (status)
2010                         goto err_msix;
2011         }
2012
2013         return 0;
2014
2015 err_msix:
2016         be_free_irq(adapter, &adapter->tx_eq, adapter);
2017
2018         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2019                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2020
2021 err:
2022         dev_warn(&adapter->pdev->dev,
2023                 "MSIX Request IRQ failed - err %d\n", status);
2024         pci_disable_msix(adapter->pdev);
2025         adapter->msix_enabled = false;
2026         return status;
2027 }
2028
2029 static int be_irq_register(struct be_adapter *adapter)
2030 {
2031         struct net_device *netdev = adapter->netdev;
2032         int status;
2033
2034         if (adapter->msix_enabled) {
2035                 status = be_msix_register(adapter);
2036                 if (status == 0)
2037                         goto done;
2038                 /* INTx is not supported for VF */
2039                 if (!be_physfn(adapter))
2040                         return status;
2041         }
2042
2043         /* INTx */
2044         netdev->irq = adapter->pdev->irq;
2045         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2046                         adapter);
2047         if (status) {
2048                 dev_err(&adapter->pdev->dev,
2049                         "INTx request IRQ failed - err %d\n", status);
2050                 return status;
2051         }
2052 done:
2053         adapter->isr_registered = true;
2054         return 0;
2055 }
2056
2057 static void be_irq_unregister(struct be_adapter *adapter)
2058 {
2059         struct net_device *netdev = adapter->netdev;
2060         struct be_rx_obj *rxo;
2061         int i;
2062
2063         if (!adapter->isr_registered)
2064                 return;
2065
2066         /* INTx */
2067         if (!adapter->msix_enabled) {
2068                 free_irq(netdev->irq, adapter);
2069                 goto done;
2070         }
2071
2072         /* MSIx */
2073         be_free_irq(adapter, &adapter->tx_eq, adapter);
2074
2075         for_all_rx_queues(adapter, rxo, i)
2076                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2077
2078 done:
2079         adapter->isr_registered = false;
2080 }
2081
2082 static int be_close(struct net_device *netdev)
2083 {
2084         struct be_adapter *adapter = netdev_priv(netdev);
2085         struct be_rx_obj *rxo;
2086         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2087         int vec, i;
2088
2089         be_async_mcc_disable(adapter);
2090
2091         netif_stop_queue(netdev);
2092         netif_carrier_off(netdev);
2093         adapter->link_up = false;
2094
2095         if (!lancer_chip(adapter))
2096                 be_intr_set(adapter, false);
2097
2098         if (adapter->msix_enabled) {
2099                 vec = be_msix_vec_get(adapter, tx_eq);
2100                 synchronize_irq(vec);
2101
2102                 for_all_rx_queues(adapter, rxo, i) {
2103                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2104                         synchronize_irq(vec);
2105                 }
2106         } else {
2107                 synchronize_irq(netdev->irq);
2108         }
2109         be_irq_unregister(adapter);
2110
2111         for_all_rx_queues(adapter, rxo, i)
2112                 napi_disable(&rxo->rx_eq.napi);
2113
2114         napi_disable(&tx_eq->napi);
2115
2116         /* Wait for all pending tx completions to arrive so that
2117          * all tx skbs are freed.
2118          */
2119         be_tx_compl_clean(adapter);
2120
2121         return 0;
2122 }
2123
2124 static int be_open(struct net_device *netdev)
2125 {
2126         struct be_adapter *adapter = netdev_priv(netdev);
2127         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2128         struct be_rx_obj *rxo;
2129         bool link_up;
2130         int status, i;
2131         u8 mac_speed;
2132         u16 link_speed;
2133
2134         for_all_rx_queues(adapter, rxo, i) {
2135                 be_post_rx_frags(rxo);
2136                 napi_enable(&rxo->rx_eq.napi);
2137         }
2138         napi_enable(&tx_eq->napi);
2139
2140         be_irq_register(adapter);
2141
2142         if (!lancer_chip(adapter))
2143                 be_intr_set(adapter, true);
2144
2145         /* The evt queues are created in unarmed state; arm them */
2146         for_all_rx_queues(adapter, rxo, i) {
2147                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2148                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2149         }
2150         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2151
2152         /* Now that interrupts are on we can process async mcc */
2153         be_async_mcc_enable(adapter);
2154
2155         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2156                         &link_speed);
2157         if (status)
2158                 goto err;
2159         be_link_status_update(adapter, link_up);
2160
2161         if (be_physfn(adapter)) {
2162                 status = be_vid_config(adapter, false, 0);
2163                 if (status)
2164                         goto err;
2165
2166                 status = be_cmd_set_flow_control(adapter,
2167                                 adapter->tx_fc, adapter->rx_fc);
2168                 if (status)
2169                         goto err;
2170         }
2171
2172         return 0;
2173 err:
2174         be_close(adapter->netdev);
2175         return -EIO;
2176 }
2177
2178 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2179 {
2180         struct be_dma_mem cmd;
2181         int status = 0;
2182         u8 mac[ETH_ALEN];
2183
2184         memset(mac, 0, ETH_ALEN);
2185
2186         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2187         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2188                                     GFP_KERNEL);
2189         if (cmd.va == NULL)
2190                 return -1;
2191         memset(cmd.va, 0, cmd.size);
2192
2193         if (enable) {
2194                 status = pci_write_config_dword(adapter->pdev,
2195                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2196                 if (status) {
2197                         dev_err(&adapter->pdev->dev,
2198                                 "Could not enable Wake-on-lan\n");
2199                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2200                                           cmd.dma);
2201                         return status;
2202                 }
2203                 status = be_cmd_enable_magic_wol(adapter,
2204                                 adapter->netdev->dev_addr, &cmd);
2205                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2206                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2207         } else {
2208                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2209                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2210                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2211         }
2212
2213         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2214         return status;
2215 }
2216
2217 /*
2218  * Generate a seed MAC address from the PF MAC Address using jhash.
2219  * MAC Address for VFs are assigned incrementally starting from the seed.
2220  * These addresses are programmed in the ASIC by the PF and the VF driver
2221  * queries for the MAC address during its probe.
2222  */
2223 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2224 {
2225         u32 vf = 0;
2226         int status = 0;
2227         u8 mac[ETH_ALEN];
2228
2229         be_vf_eth_addr_generate(adapter, mac);
2230
2231         for (vf = 0; vf < num_vfs; vf++) {
2232                 status = be_cmd_pmac_add(adapter, mac,
2233                                         adapter->vf_cfg[vf].vf_if_handle,
2234                                         &adapter->vf_cfg[vf].vf_pmac_id,
2235                                         vf + 1);
2236                 if (status)
2237                         dev_err(&adapter->pdev->dev,
2238                                 "Mac address add failed for VF %d\n", vf);
2239                 else
2240                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2241
2242                 mac[5] += 1;
2243         }
2244         return status;
2245 }
2246
2247 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2248 {
2249         u32 vf;
2250
2251         for (vf = 0; vf < num_vfs; vf++) {
2252                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2253                         be_cmd_pmac_del(adapter,
2254                                         adapter->vf_cfg[vf].vf_if_handle,
2255                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2256         }
2257 }
2258
2259 static int be_setup(struct be_adapter *adapter)
2260 {
2261         struct net_device *netdev = adapter->netdev;
2262         u32 cap_flags, en_flags, vf = 0;
2263         int status;
2264         u8 mac[ETH_ALEN];
2265
2266         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2267
2268         if (be_physfn(adapter)) {
2269                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2270                                 BE_IF_FLAGS_PROMISCUOUS |
2271                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2272                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2273
2274                 if (be_multi_rxq(adapter)) {
2275                         cap_flags |= BE_IF_FLAGS_RSS;
2276                         en_flags |= BE_IF_FLAGS_RSS;
2277                 }
2278         }
2279
2280         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2281                         netdev->dev_addr, false/* pmac_invalid */,
2282                         &adapter->if_handle, &adapter->pmac_id, 0);
2283         if (status != 0)
2284                 goto do_none;
2285
2286         if (be_physfn(adapter)) {
2287                 if (adapter->sriov_enabled) {
2288                         while (vf < num_vfs) {
2289                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2290                                                         BE_IF_FLAGS_BROADCAST;
2291                                 status = be_cmd_if_create(adapter, cap_flags,
2292                                         en_flags, mac, true,
2293                                         &adapter->vf_cfg[vf].vf_if_handle,
2294                                         NULL, vf+1);
2295                                 if (status) {
2296                                         dev_err(&adapter->pdev->dev,
2297                                         "Interface Create failed for VF %d\n",
2298                                         vf);
2299                                         goto if_destroy;
2300                                 }
2301                                 adapter->vf_cfg[vf].vf_pmac_id =
2302                                                         BE_INVALID_PMAC_ID;
2303                                 vf++;
2304                         }
2305                 }
2306         } else {
2307                 status = be_cmd_mac_addr_query(adapter, mac,
2308                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2309                 if (!status) {
2310                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2311                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2312                 }
2313         }
2314
2315         status = be_tx_queues_create(adapter);
2316         if (status != 0)
2317                 goto if_destroy;
2318
2319         status = be_rx_queues_create(adapter);
2320         if (status != 0)
2321                 goto tx_qs_destroy;
2322
2323         status = be_mcc_queues_create(adapter);
2324         if (status != 0)
2325                 goto rx_qs_destroy;
2326
2327         adapter->link_speed = -1;
2328
2329         return 0;
2330
2331         be_mcc_queues_destroy(adapter);
2332 rx_qs_destroy:
2333         be_rx_queues_destroy(adapter);
2334 tx_qs_destroy:
2335         be_tx_queues_destroy(adapter);
2336 if_destroy:
2337         if (be_physfn(adapter) && adapter->sriov_enabled)
2338                 for (vf = 0; vf < num_vfs; vf++)
2339                         if (adapter->vf_cfg[vf].vf_if_handle)
2340                                 be_cmd_if_destroy(adapter,
2341                                         adapter->vf_cfg[vf].vf_if_handle,
2342                                         vf + 1);
2343         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2344 do_none:
2345         return status;
2346 }
2347
2348 static int be_clear(struct be_adapter *adapter)
2349 {
2350         int vf;
2351
2352         if (be_physfn(adapter) && adapter->sriov_enabled)
2353                 be_vf_eth_addr_rem(adapter);
2354
2355         be_mcc_queues_destroy(adapter);
2356         be_rx_queues_destroy(adapter);
2357         be_tx_queues_destroy(adapter);
2358
2359         if (be_physfn(adapter) && adapter->sriov_enabled)
2360                 for (vf = 0; vf < num_vfs; vf++)
2361                         if (adapter->vf_cfg[vf].vf_if_handle)
2362                                 be_cmd_if_destroy(adapter,
2363                                         adapter->vf_cfg[vf].vf_if_handle,
2364                                         vf + 1);
2365
2366         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2367
2368         /* tell fw we're done with firing cmds */
2369         be_cmd_fw_clean(adapter);
2370         return 0;
2371 }
2372
2373
2374 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2375 static bool be_flash_redboot(struct be_adapter *adapter,
2376                         const u8 *p, u32 img_start, int image_size,
2377                         int hdr_size)
2378 {
2379         u32 crc_offset;
2380         u8 flashed_crc[4];
2381         int status;
2382
2383         crc_offset = hdr_size + img_start + image_size - 4;
2384
2385         p += crc_offset;
2386
2387         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2388                         (image_size - 4));
2389         if (status) {
2390                 dev_err(&adapter->pdev->dev,
2391                 "could not get crc from flash, not flashing redboot\n");
2392                 return false;
2393         }
2394
2395         /*update redboot only if crc does not match*/
2396         if (!memcmp(flashed_crc, p, 4))
2397                 return false;
2398         else
2399                 return true;
2400 }
2401
2402 static int be_flash_data(struct be_adapter *adapter,
2403                         const struct firmware *fw,
2404                         struct be_dma_mem *flash_cmd, int num_of_images)
2405
2406 {
2407         int status = 0, i, filehdr_size = 0;
2408         u32 total_bytes = 0, flash_op;
2409         int num_bytes;
2410         const u8 *p = fw->data;
2411         struct be_cmd_write_flashrom *req = flash_cmd->va;
2412         const struct flash_comp *pflashcomp;
2413         int num_comp;
2414
2415         static const struct flash_comp gen3_flash_types[9] = {
2416                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2417                         FLASH_IMAGE_MAX_SIZE_g3},
2418                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2419                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2420                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2421                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2422                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2423                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2424                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2425                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2426                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2427                         FLASH_IMAGE_MAX_SIZE_g3},
2428                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2429                         FLASH_IMAGE_MAX_SIZE_g3},
2430                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2431                         FLASH_IMAGE_MAX_SIZE_g3},
2432                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2433                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2434         };
2435         static const struct flash_comp gen2_flash_types[8] = {
2436                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2437                         FLASH_IMAGE_MAX_SIZE_g2},
2438                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2439                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2440                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2441                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2442                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2443                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2444                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2445                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2446                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2447                         FLASH_IMAGE_MAX_SIZE_g2},
2448                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2449                         FLASH_IMAGE_MAX_SIZE_g2},
2450                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2451                          FLASH_IMAGE_MAX_SIZE_g2}
2452         };
2453
2454         if (adapter->generation == BE_GEN3) {
2455                 pflashcomp = gen3_flash_types;
2456                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2457                 num_comp = ARRAY_SIZE(gen3_flash_types);
2458         } else {
2459                 pflashcomp = gen2_flash_types;
2460                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2461                 num_comp = ARRAY_SIZE(gen2_flash_types);
2462         }
2463         for (i = 0; i < num_comp; i++) {
2464                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2465                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2466                         continue;
2467                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2468                         (!be_flash_redboot(adapter, fw->data,
2469                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2470                         (num_of_images * sizeof(struct image_hdr)))))
2471                         continue;
2472                 p = fw->data;
2473                 p += filehdr_size + pflashcomp[i].offset
2474                         + (num_of_images * sizeof(struct image_hdr));
2475         if (p + pflashcomp[i].size > fw->data + fw->size)
2476                 return -1;
2477         total_bytes = pflashcomp[i].size;
2478                 while (total_bytes) {
2479                         if (total_bytes > 32*1024)
2480                                 num_bytes = 32*1024;
2481                         else
2482                                 num_bytes = total_bytes;
2483                         total_bytes -= num_bytes;
2484
2485                         if (!total_bytes)
2486                                 flash_op = FLASHROM_OPER_FLASH;
2487                         else
2488                                 flash_op = FLASHROM_OPER_SAVE;
2489                         memcpy(req->params.data_buf, p, num_bytes);
2490                         p += num_bytes;
2491                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2492                                 pflashcomp[i].optype, flash_op, num_bytes);
2493                         if (status) {
2494                                 dev_err(&adapter->pdev->dev,
2495                                         "cmd to write to flash rom failed.\n");
2496                                 return -1;
2497                         }
2498                         yield();
2499                 }
2500         }
2501         return 0;
2502 }
2503
2504 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2505 {
2506         if (fhdr == NULL)
2507                 return 0;
2508         if (fhdr->build[0] == '3')
2509                 return BE_GEN3;
2510         else if (fhdr->build[0] == '2')
2511                 return BE_GEN2;
2512         else
2513                 return 0;
2514 }
2515
2516 int be_load_fw(struct be_adapter *adapter, u8 *func)
2517 {
2518         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2519         const struct firmware *fw;
2520         struct flash_file_hdr_g2 *fhdr;
2521         struct flash_file_hdr_g3 *fhdr3;
2522         struct image_hdr *img_hdr_ptr = NULL;
2523         struct be_dma_mem flash_cmd;
2524         int status, i = 0, num_imgs = 0;
2525         const u8 *p;
2526
2527         if (!netif_running(adapter->netdev)) {
2528                 dev_err(&adapter->pdev->dev,
2529                         "Firmware load not allowed (interface is down)\n");
2530                 return -EPERM;
2531         }
2532
2533         strcpy(fw_file, func);
2534
2535         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2536         if (status)
2537                 goto fw_exit;
2538
2539         p = fw->data;
2540         fhdr = (struct flash_file_hdr_g2 *) p;
2541         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2542
2543         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2544         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2545                                           &flash_cmd.dma, GFP_KERNEL);
2546         if (!flash_cmd.va) {
2547                 status = -ENOMEM;
2548                 dev_err(&adapter->pdev->dev,
2549                         "Memory allocation failure while flashing\n");
2550                 goto fw_exit;
2551         }
2552
2553         if ((adapter->generation == BE_GEN3) &&
2554                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2555                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2556                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2557                 for (i = 0; i < num_imgs; i++) {
2558                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2559                                         (sizeof(struct flash_file_hdr_g3) +
2560                                          i * sizeof(struct image_hdr)));
2561                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2562                                 status = be_flash_data(adapter, fw, &flash_cmd,
2563                                                         num_imgs);
2564                 }
2565         } else if ((adapter->generation == BE_GEN2) &&
2566                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2567                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2568         } else {
2569                 dev_err(&adapter->pdev->dev,
2570                         "UFI and Interface are not compatible for flashing\n");
2571                 status = -1;
2572         }
2573
2574         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2575                           flash_cmd.dma);
2576         if (status) {
2577                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2578                 goto fw_exit;
2579         }
2580
2581         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2582
2583 fw_exit:
2584         release_firmware(fw);
2585         return status;
2586 }
2587
2588 static struct net_device_ops be_netdev_ops = {
2589         .ndo_open               = be_open,
2590         .ndo_stop               = be_close,
2591         .ndo_start_xmit         = be_xmit,
2592         .ndo_set_rx_mode        = be_set_multicast_list,
2593         .ndo_set_mac_address    = be_mac_addr_set,
2594         .ndo_change_mtu         = be_change_mtu,
2595         .ndo_validate_addr      = eth_validate_addr,
2596         .ndo_vlan_rx_register   = be_vlan_register,
2597         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2598         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2599         .ndo_set_vf_mac         = be_set_vf_mac,
2600         .ndo_set_vf_vlan        = be_set_vf_vlan,
2601         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2602         .ndo_get_vf_config      = be_get_vf_config
2603 };
2604
2605 static void be_netdev_init(struct net_device *netdev)
2606 {
2607         struct be_adapter *adapter = netdev_priv(netdev);
2608         struct be_rx_obj *rxo;
2609         int i;
2610
2611         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2612                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2613                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2614                 NETIF_F_GRO | NETIF_F_TSO6;
2615
2616         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2617                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2618
2619         if (lancer_chip(adapter))
2620                 netdev->vlan_features |= NETIF_F_TSO6;
2621
2622         netdev->flags |= IFF_MULTICAST;
2623
2624         adapter->rx_csum = true;
2625
2626         /* Default settings for Rx and Tx flow control */
2627         adapter->rx_fc = true;
2628         adapter->tx_fc = true;
2629
2630         netif_set_gso_max_size(netdev, 65535);
2631
2632         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2633
2634         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2635
2636         for_all_rx_queues(adapter, rxo, i)
2637                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2638                                 BE_NAPI_WEIGHT);
2639
2640         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2641                 BE_NAPI_WEIGHT);
2642 }
2643
2644 static void be_unmap_pci_bars(struct be_adapter *adapter)
2645 {
2646         if (adapter->csr)
2647                 iounmap(adapter->csr);
2648         if (adapter->db)
2649                 iounmap(adapter->db);
2650         if (adapter->pcicfg && be_physfn(adapter))
2651                 iounmap(adapter->pcicfg);
2652 }
2653
2654 static int be_map_pci_bars(struct be_adapter *adapter)
2655 {
2656         u8 __iomem *addr;
2657         int pcicfg_reg, db_reg;
2658
2659         if (lancer_chip(adapter)) {
2660                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2661                         pci_resource_len(adapter->pdev, 0));
2662                 if (addr == NULL)
2663                         return -ENOMEM;
2664                 adapter->db = addr;
2665                 return 0;
2666         }
2667
2668         if (be_physfn(adapter)) {
2669                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2670                                 pci_resource_len(adapter->pdev, 2));
2671                 if (addr == NULL)
2672                         return -ENOMEM;
2673                 adapter->csr = addr;
2674         }
2675
2676         if (adapter->generation == BE_GEN2) {
2677                 pcicfg_reg = 1;
2678                 db_reg = 4;
2679         } else {
2680                 pcicfg_reg = 0;
2681                 if (be_physfn(adapter))
2682                         db_reg = 4;
2683                 else
2684                         db_reg = 0;
2685         }
2686         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2687                                 pci_resource_len(adapter->pdev, db_reg));
2688         if (addr == NULL)
2689                 goto pci_map_err;
2690         adapter->db = addr;
2691
2692         if (be_physfn(adapter)) {
2693                 addr = ioremap_nocache(
2694                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2695                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2696                 if (addr == NULL)
2697                         goto pci_map_err;
2698                 adapter->pcicfg = addr;
2699         } else
2700                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2701
2702         return 0;
2703 pci_map_err:
2704         be_unmap_pci_bars(adapter);
2705         return -ENOMEM;
2706 }
2707
2708
2709 static void be_ctrl_cleanup(struct be_adapter *adapter)
2710 {
2711         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2712
2713         be_unmap_pci_bars(adapter);
2714
2715         if (mem->va)
2716                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2717                                   mem->dma);
2718
2719         mem = &adapter->mc_cmd_mem;
2720         if (mem->va)
2721                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2722                                   mem->dma);
2723 }
2724
2725 static int be_ctrl_init(struct be_adapter *adapter)
2726 {
2727         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2728         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2729         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2730         int status;
2731
2732         status = be_map_pci_bars(adapter);
2733         if (status)
2734                 goto done;
2735
2736         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2737         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2738                                                 mbox_mem_alloc->size,
2739                                                 &mbox_mem_alloc->dma,
2740                                                 GFP_KERNEL);
2741         if (!mbox_mem_alloc->va) {
2742                 status = -ENOMEM;
2743                 goto unmap_pci_bars;
2744         }
2745
2746         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2747         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2748         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2749         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2750
2751         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2752         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2753                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
2754                                             GFP_KERNEL);
2755         if (mc_cmd_mem->va == NULL) {
2756                 status = -ENOMEM;
2757                 goto free_mbox;
2758         }
2759         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2760
2761         mutex_init(&adapter->mbox_lock);
2762         spin_lock_init(&adapter->mcc_lock);
2763         spin_lock_init(&adapter->mcc_cq_lock);
2764
2765         init_completion(&adapter->flash_compl);
2766         pci_save_state(adapter->pdev);
2767         return 0;
2768
2769 free_mbox:
2770         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2771                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
2772
2773 unmap_pci_bars:
2774         be_unmap_pci_bars(adapter);
2775
2776 done:
2777         return status;
2778 }
2779
2780 static void be_stats_cleanup(struct be_adapter *adapter)
2781 {
2782         struct be_dma_mem *cmd = &adapter->stats_cmd;
2783
2784         if (cmd->va)
2785                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2786                                   cmd->va, cmd->dma);
2787 }
2788
2789 static int be_stats_init(struct be_adapter *adapter)
2790 {
2791         struct be_dma_mem *cmd = &adapter->stats_cmd;
2792
2793         cmd->size = sizeof(struct be_cmd_req_get_stats);
2794         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2795                                      GFP_KERNEL);
2796         if (cmd->va == NULL)
2797                 return -1;
2798         memset(cmd->va, 0, cmd->size);
2799         return 0;
2800 }
2801
2802 static void __devexit be_remove(struct pci_dev *pdev)
2803 {
2804         struct be_adapter *adapter = pci_get_drvdata(pdev);
2805
2806         if (!adapter)
2807                 return;
2808
2809         cancel_delayed_work_sync(&adapter->work);
2810
2811         unregister_netdev(adapter->netdev);
2812
2813         be_clear(adapter);
2814
2815         be_stats_cleanup(adapter);
2816
2817         be_ctrl_cleanup(adapter);
2818
2819         be_sriov_disable(adapter);
2820
2821         be_msix_disable(adapter);
2822
2823         pci_set_drvdata(pdev, NULL);
2824         pci_release_regions(pdev);
2825         pci_disable_device(pdev);
2826
2827         free_netdev(adapter->netdev);
2828 }
2829
2830 static int be_get_config(struct be_adapter *adapter)
2831 {
2832         int status;
2833         u8 mac[ETH_ALEN];
2834
2835         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2836         if (status)
2837                 return status;
2838
2839         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2840                         &adapter->function_mode, &adapter->function_caps);
2841         if (status)
2842                 return status;
2843
2844         memset(mac, 0, ETH_ALEN);
2845
2846         if (be_physfn(adapter)) {
2847                 status = be_cmd_mac_addr_query(adapter, mac,
2848                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2849
2850                 if (status)
2851                         return status;
2852
2853                 if (!is_valid_ether_addr(mac))
2854                         return -EADDRNOTAVAIL;
2855
2856                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2857                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2858         }
2859
2860         if (adapter->function_mode & 0x400)
2861                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2862         else
2863                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2864
2865         return 0;
2866 }
2867
2868 static int be_dev_family_check(struct be_adapter *adapter)
2869 {
2870         struct pci_dev *pdev = adapter->pdev;
2871         u32 sli_intf = 0, if_type;
2872
2873         switch (pdev->device) {
2874         case BE_DEVICE_ID1:
2875         case OC_DEVICE_ID1:
2876                 adapter->generation = BE_GEN2;
2877                 break;
2878         case BE_DEVICE_ID2:
2879         case OC_DEVICE_ID2:
2880                 adapter->generation = BE_GEN3;
2881                 break;
2882         case OC_DEVICE_ID3:
2883                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2884                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2885                                                 SLI_INTF_IF_TYPE_SHIFT;
2886
2887                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2888                         if_type != 0x02) {
2889                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2890                         return -EINVAL;
2891                 }
2892                 if (num_vfs > 0) {
2893                         dev_err(&pdev->dev, "VFs not supported\n");
2894                         return -EINVAL;
2895                 }
2896                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2897                                          SLI_INTF_FAMILY_SHIFT);
2898                 adapter->generation = BE_GEN3;
2899                 break;
2900         default:
2901                 adapter->generation = 0;
2902         }
2903         return 0;
2904 }
2905
2906 static int __devinit be_probe(struct pci_dev *pdev,
2907                         const struct pci_device_id *pdev_id)
2908 {
2909         int status = 0;
2910         struct be_adapter *adapter;
2911         struct net_device *netdev;
2912
2913         status = pci_enable_device(pdev);
2914         if (status)
2915                 goto do_none;
2916
2917         status = pci_request_regions(pdev, DRV_NAME);
2918         if (status)
2919                 goto disable_dev;
2920         pci_set_master(pdev);
2921
2922         netdev = alloc_etherdev(sizeof(struct be_adapter));
2923         if (netdev == NULL) {
2924                 status = -ENOMEM;
2925                 goto rel_reg;
2926         }
2927         adapter = netdev_priv(netdev);
2928         adapter->pdev = pdev;
2929         pci_set_drvdata(pdev, adapter);
2930
2931         status = be_dev_family_check(adapter);
2932         if (status)
2933                 goto free_netdev;
2934
2935         adapter->netdev = netdev;
2936         SET_NETDEV_DEV(netdev, &pdev->dev);
2937
2938         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2939         if (!status) {
2940                 netdev->features |= NETIF_F_HIGHDMA;
2941         } else {
2942                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2943                 if (status) {
2944                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2945                         goto free_netdev;
2946                 }
2947         }
2948
2949         be_sriov_enable(adapter);
2950
2951         status = be_ctrl_init(adapter);
2952         if (status)
2953                 goto free_netdev;
2954
2955         /* sync up with fw's ready state */
2956         if (be_physfn(adapter)) {
2957                 status = be_cmd_POST(adapter);
2958                 if (status)
2959                         goto ctrl_clean;
2960         }
2961
2962         /* tell fw we're ready to fire cmds */
2963         status = be_cmd_fw_init(adapter);
2964         if (status)
2965                 goto ctrl_clean;
2966
2967         status = be_cmd_reset_function(adapter);
2968         if (status)
2969                 goto ctrl_clean;
2970
2971         status = be_stats_init(adapter);
2972         if (status)
2973                 goto ctrl_clean;
2974
2975         status = be_get_config(adapter);
2976         if (status)
2977                 goto stats_clean;
2978
2979         be_msix_enable(adapter);
2980
2981         INIT_DELAYED_WORK(&adapter->work, be_worker);
2982
2983         status = be_setup(adapter);
2984         if (status)
2985                 goto msix_disable;
2986
2987         be_netdev_init(netdev);
2988         status = register_netdev(netdev);
2989         if (status != 0)
2990                 goto unsetup;
2991         netif_carrier_off(netdev);
2992
2993         if (be_physfn(adapter) && adapter->sriov_enabled) {
2994                 status = be_vf_eth_addr_config(adapter);
2995                 if (status)
2996                         goto unreg_netdev;
2997         }
2998
2999         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3000         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3001         return 0;
3002
3003 unreg_netdev:
3004         unregister_netdev(netdev);
3005 unsetup:
3006         be_clear(adapter);
3007 msix_disable:
3008         be_msix_disable(adapter);
3009 stats_clean:
3010         be_stats_cleanup(adapter);
3011 ctrl_clean:
3012         be_ctrl_cleanup(adapter);
3013 free_netdev:
3014         be_sriov_disable(adapter);
3015         free_netdev(netdev);
3016         pci_set_drvdata(pdev, NULL);
3017 rel_reg:
3018         pci_release_regions(pdev);
3019 disable_dev:
3020         pci_disable_device(pdev);
3021 do_none:
3022         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3023         return status;
3024 }
3025
3026 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3027 {
3028         struct be_adapter *adapter = pci_get_drvdata(pdev);
3029         struct net_device *netdev =  adapter->netdev;
3030
3031         cancel_delayed_work_sync(&adapter->work);
3032         if (adapter->wol)
3033                 be_setup_wol(adapter, true);
3034
3035         netif_device_detach(netdev);
3036         if (netif_running(netdev)) {
3037                 rtnl_lock();
3038                 be_close(netdev);
3039                 rtnl_unlock();
3040         }
3041         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3042         be_clear(adapter);
3043
3044         be_msix_disable(adapter);
3045         pci_save_state(pdev);
3046         pci_disable_device(pdev);
3047         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3048         return 0;
3049 }
3050
3051 static int be_resume(struct pci_dev *pdev)
3052 {
3053         int status = 0;
3054         struct be_adapter *adapter = pci_get_drvdata(pdev);
3055         struct net_device *netdev =  adapter->netdev;
3056
3057         netif_device_detach(netdev);
3058
3059         status = pci_enable_device(pdev);
3060         if (status)
3061                 return status;
3062
3063         pci_set_power_state(pdev, 0);
3064         pci_restore_state(pdev);
3065
3066         be_msix_enable(adapter);
3067         /* tell fw we're ready to fire cmds */
3068         status = be_cmd_fw_init(adapter);
3069         if (status)
3070                 return status;
3071
3072         be_setup(adapter);
3073         if (netif_running(netdev)) {
3074                 rtnl_lock();
3075                 be_open(netdev);
3076                 rtnl_unlock();
3077         }
3078         netif_device_attach(netdev);
3079
3080         if (adapter->wol)
3081                 be_setup_wol(adapter, false);
3082
3083         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3084         return 0;
3085 }
3086
3087 /*
3088  * An FLR will stop BE from DMAing any data.
3089  */
3090 static void be_shutdown(struct pci_dev *pdev)
3091 {
3092         struct be_adapter *adapter = pci_get_drvdata(pdev);
3093         struct net_device *netdev =  adapter->netdev;
3094
3095         if (netif_running(netdev))
3096                 cancel_delayed_work_sync(&adapter->work);
3097
3098         netif_device_detach(netdev);
3099
3100         be_cmd_reset_function(adapter);
3101
3102         if (adapter->wol)
3103                 be_setup_wol(adapter, true);
3104
3105         pci_disable_device(pdev);
3106 }
3107
3108 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3109                                 pci_channel_state_t state)
3110 {
3111         struct be_adapter *adapter = pci_get_drvdata(pdev);
3112         struct net_device *netdev =  adapter->netdev;
3113
3114         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3115
3116         adapter->eeh_err = true;
3117
3118         netif_device_detach(netdev);
3119
3120         if (netif_running(netdev)) {
3121                 rtnl_lock();
3122                 be_close(netdev);
3123                 rtnl_unlock();
3124         }
3125         be_clear(adapter);
3126
3127         if (state == pci_channel_io_perm_failure)
3128                 return PCI_ERS_RESULT_DISCONNECT;
3129
3130         pci_disable_device(pdev);
3131
3132         return PCI_ERS_RESULT_NEED_RESET;
3133 }
3134
3135 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3136 {
3137         struct be_adapter *adapter = pci_get_drvdata(pdev);
3138         int status;
3139
3140         dev_info(&adapter->pdev->dev, "EEH reset\n");
3141         adapter->eeh_err = false;
3142
3143         status = pci_enable_device(pdev);
3144         if (status)
3145                 return PCI_ERS_RESULT_DISCONNECT;
3146
3147         pci_set_master(pdev);
3148         pci_set_power_state(pdev, 0);
3149         pci_restore_state(pdev);
3150
3151         /* Check if card is ok and fw is ready */
3152         status = be_cmd_POST(adapter);
3153         if (status)
3154                 return PCI_ERS_RESULT_DISCONNECT;
3155
3156         return PCI_ERS_RESULT_RECOVERED;
3157 }
3158
3159 static void be_eeh_resume(struct pci_dev *pdev)
3160 {
3161         int status = 0;
3162         struct be_adapter *adapter = pci_get_drvdata(pdev);
3163         struct net_device *netdev =  adapter->netdev;
3164
3165         dev_info(&adapter->pdev->dev, "EEH resume\n");
3166
3167         pci_save_state(pdev);
3168
3169         /* tell fw we're ready to fire cmds */
3170         status = be_cmd_fw_init(adapter);
3171         if (status)
3172                 goto err;
3173
3174         status = be_setup(adapter);
3175         if (status)
3176                 goto err;
3177
3178         if (netif_running(netdev)) {
3179                 status = be_open(netdev);
3180                 if (status)
3181                         goto err;
3182         }
3183         netif_device_attach(netdev);
3184         return;
3185 err:
3186         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3187 }
3188
3189 static struct pci_error_handlers be_eeh_handlers = {
3190         .error_detected = be_eeh_err_detected,
3191         .slot_reset = be_eeh_reset,
3192         .resume = be_eeh_resume,
3193 };
3194
3195 static struct pci_driver be_driver = {
3196         .name = DRV_NAME,
3197         .id_table = be_dev_ids,
3198         .probe = be_probe,
3199         .remove = be_remove,
3200         .suspend = be_suspend,
3201         .resume = be_resume,
3202         .shutdown = be_shutdown,
3203         .err_handler = &be_eeh_handlers
3204 };
3205
3206 static int __init be_init_module(void)
3207 {
3208         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3209             rx_frag_size != 2048) {
3210                 printk(KERN_WARNING DRV_NAME
3211                         " : Module param rx_frag_size must be 2048/4096/8192."
3212                         " Using 2048\n");
3213                 rx_frag_size = 2048;
3214         }
3215
3216         if (num_vfs > 32) {
3217                 printk(KERN_WARNING DRV_NAME
3218                         " : Module param num_vfs must not be greater than 32."
3219                         "Using 32\n");
3220                 num_vfs = 32;
3221         }
3222
3223         return pci_register_driver(&be_driver);
3224 }
3225 module_init(be_init_module);
3226
3227 static void __exit be_exit_module(void)
3228 {
3229         pci_unregister_driver(&be_driver);
3230 }
3231 module_exit(be_exit_module);