Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[firefly-linux-kernel-4.4.55.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
22
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
28
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static bool multi_rxq = true;
37 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39
40 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
47         { 0 }
48 };
49 MODULE_DEVICE_TABLE(pci, be_dev_ids);
50 /* UE Status Low CSR */
51 static char *ue_status_low_desc[] = {
52         "CEV",
53         "CTX",
54         "DBUF",
55         "ERX",
56         "Host",
57         "MPU",
58         "NDMA",
59         "PTC ",
60         "RDMA ",
61         "RXF ",
62         "RXIPS ",
63         "RXULP0 ",
64         "RXULP1 ",
65         "RXULP2 ",
66         "TIM ",
67         "TPOST ",
68         "TPRE ",
69         "TXIPS ",
70         "TXULP0 ",
71         "TXULP1 ",
72         "UC ",
73         "WDMA ",
74         "TXULP2 ",
75         "HOST1 ",
76         "P0_OB_LINK ",
77         "P1_OB_LINK ",
78         "HOST_GPIO ",
79         "MBOX ",
80         "AXGMAC0",
81         "AXGMAC1",
82         "JTAG",
83         "MPU_INTPEND"
84 };
85 /* UE Status High CSR */
86 static char *ue_status_hi_desc[] = {
87         "LPCMEMHOST",
88         "MGMT_MAC",
89         "PCS0ONLINE",
90         "MPU_IRAM",
91         "PCS1ONLINE",
92         "PCTL0",
93         "PCTL1",
94         "PMEM",
95         "RR",
96         "TXPB",
97         "RXPP",
98         "XAUI",
99         "TXP",
100         "ARM",
101         "IPC",
102         "HOST2",
103         "HOST3",
104         "HOST4",
105         "HOST5",
106         "HOST6",
107         "HOST7",
108         "HOST8",
109         "HOST9",
110         "NETC"
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown"
119 };
120
121 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
122 {
123         struct be_dma_mem *mem = &q->dma_mem;
124         if (mem->va)
125                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
126                                   mem->dma);
127 }
128
129 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
130                 u16 len, u16 entry_size)
131 {
132         struct be_dma_mem *mem = &q->dma_mem;
133
134         memset(q, 0, sizeof(*q));
135         q->len = len;
136         q->entry_size = entry_size;
137         mem->size = len * entry_size;
138         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
139                                      GFP_KERNEL);
140         if (!mem->va)
141                 return -1;
142         memset(mem->va, 0, mem->size);
143         return 0;
144 }
145
146 static void be_intr_set(struct be_adapter *adapter, bool enable)
147 {
148         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
149         u32 reg = ioread32(addr);
150         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
151
152         if (adapter->eeh_err)
153                 return;
154
155         if (!enabled && enable)
156                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157         else if (enabled && !enable)
158                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159         else
160                 return;
161
162         iowrite32(reg, addr);
163 }
164
165 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
166 {
167         u32 val = 0;
168         val |= qid & DB_RQ_RING_ID_MASK;
169         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
170
171         wmb();
172         iowrite32(val, adapter->db + DB_RQ_OFFSET);
173 }
174
175 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
176 {
177         u32 val = 0;
178         val |= qid & DB_TXULP_RING_ID_MASK;
179         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
180
181         wmb();
182         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
183 }
184
185 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
186                 bool arm, bool clear_int, u16 num_popped)
187 {
188         u32 val = 0;
189         val |= qid & DB_EQ_RING_ID_MASK;
190         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
191                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
192
193         if (adapter->eeh_err)
194                 return;
195
196         if (arm)
197                 val |= 1 << DB_EQ_REARM_SHIFT;
198         if (clear_int)
199                 val |= 1 << DB_EQ_CLR_SHIFT;
200         val |= 1 << DB_EQ_EVNT_SHIFT;
201         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
202         iowrite32(val, adapter->db + DB_EQ_OFFSET);
203 }
204
205 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
206 {
207         u32 val = 0;
208         val |= qid & DB_CQ_RING_ID_MASK;
209         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
210                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
211
212         if (adapter->eeh_err)
213                 return;
214
215         if (arm)
216                 val |= 1 << DB_CQ_REARM_SHIFT;
217         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
218         iowrite32(val, adapter->db + DB_CQ_OFFSET);
219 }
220
221 static int be_mac_addr_set(struct net_device *netdev, void *p)
222 {
223         struct be_adapter *adapter = netdev_priv(netdev);
224         struct sockaddr *addr = p;
225         int status = 0;
226
227         if (!is_valid_ether_addr(addr->sa_data))
228                 return -EADDRNOTAVAIL;
229
230         /* MAC addr configuration will be done in hardware for VFs
231          * by their corresponding PFs. Just copy to netdev addr here
232          */
233         if (!be_physfn(adapter))
234                 goto netdev_addr;
235
236         status = be_cmd_pmac_del(adapter, adapter->if_handle,
237                                 adapter->pmac_id, 0);
238         if (status)
239                 return status;
240
241         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
242                                 adapter->if_handle, &adapter->pmac_id, 0);
243 netdev_addr:
244         if (!status)
245                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
246
247         return status;
248 }
249
250 static void populate_be2_stats(struct be_adapter *adapter)
251 {
252
253         struct be_drv_stats *drvs = &adapter->drv_stats;
254         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
255         struct be_port_rxf_stats_v0 *port_stats =
256                 be_port_rxf_stats_from_cmd(adapter);
257         struct be_rxf_stats_v0 *rxf_stats =
258                 be_rxf_stats_from_cmd(adapter);
259
260         drvs->rx_pause_frames = port_stats->rx_pause_frames;
261         drvs->rx_crc_errors = port_stats->rx_crc_errors;
262         drvs->rx_control_frames = port_stats->rx_control_frames;
263         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
264         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
265         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
266         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
267         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
268         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
269         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
270         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
271         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
272         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
273         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
274         drvs->rx_input_fifo_overflow_drop =
275                 port_stats->rx_input_fifo_overflow;
276         drvs->rx_dropped_header_too_small =
277                 port_stats->rx_dropped_header_too_small;
278         drvs->rx_address_match_errors =
279                 port_stats->rx_address_match_errors;
280         drvs->rx_alignment_symbol_errors =
281                 port_stats->rx_alignment_symbol_errors;
282
283         drvs->tx_pauseframes = port_stats->tx_pauseframes;
284         drvs->tx_controlframes = port_stats->tx_controlframes;
285
286         if (adapter->port_num)
287                 drvs->jabber_events =
288                         rxf_stats->port1_jabber_events;
289         else
290                 drvs->jabber_events =
291                         rxf_stats->port0_jabber_events;
292         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
293         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
294         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
295         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
296         drvs->forwarded_packets = rxf_stats->forwarded_packets;
297         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
298         drvs->rx_drops_no_tpre_descr =
299                 rxf_stats->rx_drops_no_tpre_descr;
300         drvs->rx_drops_too_many_frags =
301                 rxf_stats->rx_drops_too_many_frags;
302         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
303 }
304
305 static void populate_be3_stats(struct be_adapter *adapter)
306 {
307         struct be_drv_stats *drvs = &adapter->drv_stats;
308         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
309
310         struct be_rxf_stats_v1 *rxf_stats =
311                 be_rxf_stats_from_cmd(adapter);
312         struct be_port_rxf_stats_v1 *port_stats =
313                 be_port_rxf_stats_from_cmd(adapter);
314
315         drvs->rx_priority_pause_frames = 0;
316         drvs->pmem_fifo_overflow_drop = 0;
317         drvs->rx_pause_frames = port_stats->rx_pause_frames;
318         drvs->rx_crc_errors = port_stats->rx_crc_errors;
319         drvs->rx_control_frames = port_stats->rx_control_frames;
320         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
321         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
322         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
323         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
324         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
325         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
326         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
327         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
328         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
329         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
330         drvs->rx_dropped_header_too_small =
331                 port_stats->rx_dropped_header_too_small;
332         drvs->rx_input_fifo_overflow_drop =
333                 port_stats->rx_input_fifo_overflow_drop;
334         drvs->rx_address_match_errors =
335                 port_stats->rx_address_match_errors;
336         drvs->rx_alignment_symbol_errors =
337                 port_stats->rx_alignment_symbol_errors;
338         drvs->rxpp_fifo_overflow_drop =
339                 port_stats->rxpp_fifo_overflow_drop;
340         drvs->tx_pauseframes = port_stats->tx_pauseframes;
341         drvs->tx_controlframes = port_stats->tx_controlframes;
342         drvs->jabber_events = port_stats->jabber_events;
343         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347         drvs->forwarded_packets = rxf_stats->forwarded_packets;
348         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
349         drvs->rx_drops_no_tpre_descr =
350                 rxf_stats->rx_drops_no_tpre_descr;
351         drvs->rx_drops_too_many_frags =
352                 rxf_stats->rx_drops_too_many_frags;
353         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354 }
355
356 static void populate_lancer_stats(struct be_adapter *adapter)
357 {
358
359         struct be_drv_stats *drvs = &adapter->drv_stats;
360         struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
361                                                 (adapter);
362         drvs->rx_priority_pause_frames = 0;
363         drvs->pmem_fifo_overflow_drop = 0;
364         drvs->rx_pause_frames =
365                 make_64bit_val(pport_stats->rx_pause_frames_hi,
366                                  pport_stats->rx_pause_frames_lo);
367         drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
368                                                 pport_stats->rx_crc_errors_lo);
369         drvs->rx_control_frames =
370                         make_64bit_val(pport_stats->rx_control_frames_hi,
371                         pport_stats->rx_control_frames_lo);
372         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
373         drvs->rx_frame_too_long =
374                 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
375                                         pport_stats->rx_frames_too_long_lo);
376         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
377         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
378         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
379         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
380         drvs->rx_dropped_tcp_length =
381                                 pport_stats->rx_dropped_invalid_tcp_length;
382         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
383         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
384         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
385         drvs->rx_dropped_header_too_small =
386                                 pport_stats->rx_dropped_header_too_small;
387         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
389         drvs->rx_alignment_symbol_errors =
390                 make_64bit_val(pport_stats->rx_symbol_errors_hi,
391                                 pport_stats->rx_symbol_errors_lo);
392         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
393         drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
394                                         pport_stats->tx_pause_frames_lo);
395         drvs->tx_controlframes =
396                 make_64bit_val(pport_stats->tx_control_frames_hi,
397                                 pport_stats->tx_control_frames_lo);
398         drvs->jabber_events = pport_stats->rx_jabbers;
399         drvs->rx_drops_no_pbuf = 0;
400         drvs->rx_drops_no_txpb = 0;
401         drvs->rx_drops_no_erx_descr = 0;
402         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
403         drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
404                                                 pport_stats->num_forwards_lo);
405         drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
406                                                 pport_stats->rx_drops_mtu_lo);
407         drvs->rx_drops_no_tpre_descr = 0;
408         drvs->rx_drops_too_many_frags =
409                 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
410                                 pport_stats->rx_drops_too_many_frags_lo);
411 }
412
413 void be_parse_stats(struct be_adapter *adapter)
414 {
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423 }
424
425 void netdev_stats_update(struct be_adapter *adapter)
426 {
427         struct be_drv_stats *drvs = &adapter->drv_stats;
428         struct net_device_stats *dev_stats = &adapter->netdev->stats;
429         struct be_rx_obj *rxo;
430         struct be_tx_obj *txo;
431         int i;
432
433         memset(dev_stats, 0, sizeof(*dev_stats));
434         for_all_rx_queues(adapter, rxo, i) {
435                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
436                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
437                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
438                 /*  no space in linux buffers: best possible approximation */
439                 if (adapter->generation == BE_GEN3) {
440                         if (!(lancer_chip(adapter))) {
441                                 struct be_erx_stats_v1 *erx_stats =
442                                         be_erx_stats_from_cmd(adapter);
443                                 dev_stats->rx_dropped +=
444                                 erx_stats->rx_drops_no_fragments[rxo->q.id];
445                         }
446                 } else {
447                         struct be_erx_stats_v0 *erx_stats =
448                                         be_erx_stats_from_cmd(adapter);
449                         dev_stats->rx_dropped +=
450                                 erx_stats->rx_drops_no_fragments[rxo->q.id];
451                 }
452         }
453
454         for_all_tx_queues(adapter, txo, i) {
455                 dev_stats->tx_packets += tx_stats(txo)->be_tx_pkts;
456                 dev_stats->tx_bytes += tx_stats(txo)->be_tx_bytes;
457         }
458
459         /* bad pkts received */
460         dev_stats->rx_errors = drvs->rx_crc_errors +
461                 drvs->rx_alignment_symbol_errors +
462                 drvs->rx_in_range_errors +
463                 drvs->rx_out_range_errors +
464                 drvs->rx_frame_too_long +
465                 drvs->rx_dropped_too_small +
466                 drvs->rx_dropped_too_short +
467                 drvs->rx_dropped_header_too_small +
468                 drvs->rx_dropped_tcp_length +
469                 drvs->rx_dropped_runt +
470                 drvs->rx_tcp_checksum_errs +
471                 drvs->rx_ip_checksum_errs +
472                 drvs->rx_udp_checksum_errs;
473
474         /* detailed rx errors */
475         dev_stats->rx_length_errors = drvs->rx_in_range_errors +
476                 drvs->rx_out_range_errors +
477                 drvs->rx_frame_too_long;
478
479         dev_stats->rx_crc_errors = drvs->rx_crc_errors;
480
481         /* frame alignment errors */
482         dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
483
484         /* receiver fifo overrun */
485         /* drops_no_pbuf is no per i/f, it's per BE card */
486         dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
487                                 drvs->rx_input_fifo_overflow_drop +
488                                 drvs->rx_drops_no_pbuf;
489 }
490
491 void be_link_status_update(struct be_adapter *adapter, bool link_up)
492 {
493         struct net_device *netdev = adapter->netdev;
494
495         /* If link came up or went down */
496         if (adapter->link_up != link_up) {
497                 adapter->link_speed = -1;
498                 if (link_up) {
499                         netif_carrier_on(netdev);
500                         printk(KERN_INFO "%s: Link up\n", netdev->name);
501                 } else {
502                         netif_carrier_off(netdev);
503                         printk(KERN_INFO "%s: Link down\n", netdev->name);
504                 }
505                 adapter->link_up = link_up;
506         }
507 }
508
509 /* Update the EQ delay n BE based on the RX frags consumed / sec */
510 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
511 {
512         struct be_eq_obj *rx_eq = &rxo->rx_eq;
513         struct be_rx_stats *stats = &rxo->stats;
514         ulong now = jiffies;
515         u32 eqd;
516
517         if (!rx_eq->enable_aic)
518                 return;
519
520         /* Wrapped around */
521         if (time_before(now, stats->rx_fps_jiffies)) {
522                 stats->rx_fps_jiffies = now;
523                 return;
524         }
525
526         /* Update once a second */
527         if ((now - stats->rx_fps_jiffies) < HZ)
528                 return;
529
530         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
531                         ((now - stats->rx_fps_jiffies) / HZ);
532
533         stats->rx_fps_jiffies = now;
534         stats->prev_rx_frags = stats->rx_frags;
535         eqd = stats->rx_fps / 110000;
536         eqd = eqd << 3;
537         if (eqd > rx_eq->max_eqd)
538                 eqd = rx_eq->max_eqd;
539         if (eqd < rx_eq->min_eqd)
540                 eqd = rx_eq->min_eqd;
541         if (eqd < 10)
542                 eqd = 0;
543         if (eqd != rx_eq->cur_eqd)
544                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
545
546         rx_eq->cur_eqd = eqd;
547 }
548
549 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
550 {
551         u64 rate = bytes;
552
553         do_div(rate, ticks / HZ);
554         rate <<= 3;                     /* bytes/sec -> bits/sec */
555         do_div(rate, 1000000ul);        /* MB/Sec */
556
557         return rate;
558 }
559
560 static void be_tx_rate_update(struct be_tx_obj *txo)
561 {
562         struct be_tx_stats *stats = tx_stats(txo);
563         ulong now = jiffies;
564
565         /* Wrapped around? */
566         if (time_before(now, stats->be_tx_jiffies)) {
567                 stats->be_tx_jiffies = now;
568                 return;
569         }
570
571         /* Update tx rate once in two seconds */
572         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
573                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
574                                                   - stats->be_tx_bytes_prev,
575                                                  now - stats->be_tx_jiffies);
576                 stats->be_tx_jiffies = now;
577                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
578         }
579 }
580
581 static void be_tx_stats_update(struct be_tx_obj *txo,
582                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
583 {
584         struct be_tx_stats *stats = tx_stats(txo);
585
586         stats->be_tx_reqs++;
587         stats->be_tx_wrbs += wrb_cnt;
588         stats->be_tx_bytes += copied;
589         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
590         if (stopped)
591                 stats->be_tx_stops++;
592 }
593
594 /* Determine number of WRB entries needed to xmit data in an skb */
595 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
596                                                                 bool *dummy)
597 {
598         int cnt = (skb->len > skb->data_len);
599
600         cnt += skb_shinfo(skb)->nr_frags;
601
602         /* to account for hdr wrb */
603         cnt++;
604         if (lancer_chip(adapter) || !(cnt & 1)) {
605                 *dummy = false;
606         } else {
607                 /* add a dummy to make it an even num */
608                 cnt++;
609                 *dummy = true;
610         }
611         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
612         return cnt;
613 }
614
615 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
616 {
617         wrb->frag_pa_hi = upper_32_bits(addr);
618         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
619         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
620 }
621
622 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
623                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
624 {
625         u8 vlan_prio = 0;
626         u16 vlan_tag = 0;
627
628         memset(hdr, 0, sizeof(*hdr));
629
630         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
631
632         if (skb_is_gso(skb)) {
633                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
634                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
635                         hdr, skb_shinfo(skb)->gso_size);
636                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
637                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
638                 if (lancer_chip(adapter) && adapter->sli_family  ==
639                                                         LANCER_A0_SLI_FAMILY) {
640                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
641                         if (is_tcp_pkt(skb))
642                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
643                                                                 tcpcs, hdr, 1);
644                         else if (is_udp_pkt(skb))
645                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
646                                                                 udpcs, hdr, 1);
647                 }
648         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
649                 if (is_tcp_pkt(skb))
650                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
651                 else if (is_udp_pkt(skb))
652                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
653         }
654
655         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
656                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
657                 vlan_tag = vlan_tx_tag_get(skb);
658                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
659                 /* If vlan priority provided by OS is NOT in available bmap */
660                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
661                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
662                                         adapter->recommended_prio;
663                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
664         }
665
666         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
667         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
668         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
669         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
670 }
671
672 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
673                 bool unmap_single)
674 {
675         dma_addr_t dma;
676
677         be_dws_le_to_cpu(wrb, sizeof(*wrb));
678
679         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
680         if (wrb->frag_len) {
681                 if (unmap_single)
682                         dma_unmap_single(dev, dma, wrb->frag_len,
683                                          DMA_TO_DEVICE);
684                 else
685                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
686         }
687 }
688
689 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
690                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
691 {
692         dma_addr_t busaddr;
693         int i, copied = 0;
694         struct device *dev = &adapter->pdev->dev;
695         struct sk_buff *first_skb = skb;
696         struct be_eth_wrb *wrb;
697         struct be_eth_hdr_wrb *hdr;
698         bool map_single = false;
699         u16 map_head;
700
701         hdr = queue_head_node(txq);
702         queue_head_inc(txq);
703         map_head = txq->head;
704
705         if (skb->len > skb->data_len) {
706                 int len = skb_headlen(skb);
707                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
708                 if (dma_mapping_error(dev, busaddr))
709                         goto dma_err;
710                 map_single = true;
711                 wrb = queue_head_node(txq);
712                 wrb_fill(wrb, busaddr, len);
713                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
714                 queue_head_inc(txq);
715                 copied += len;
716         }
717
718         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
719                 struct skb_frag_struct *frag =
720                         &skb_shinfo(skb)->frags[i];
721                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
722                                        frag->size, DMA_TO_DEVICE);
723                 if (dma_mapping_error(dev, busaddr))
724                         goto dma_err;
725                 wrb = queue_head_node(txq);
726                 wrb_fill(wrb, busaddr, frag->size);
727                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
728                 queue_head_inc(txq);
729                 copied += frag->size;
730         }
731
732         if (dummy_wrb) {
733                 wrb = queue_head_node(txq);
734                 wrb_fill(wrb, 0, 0);
735                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
736                 queue_head_inc(txq);
737         }
738
739         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
740         be_dws_cpu_to_le(hdr, sizeof(*hdr));
741
742         return copied;
743 dma_err:
744         txq->head = map_head;
745         while (copied) {
746                 wrb = queue_head_node(txq);
747                 unmap_tx_frag(dev, wrb, map_single);
748                 map_single = false;
749                 copied -= wrb->frag_len;
750                 queue_head_inc(txq);
751         }
752         return 0;
753 }
754
755 static netdev_tx_t be_xmit(struct sk_buff *skb,
756                         struct net_device *netdev)
757 {
758         struct be_adapter *adapter = netdev_priv(netdev);
759         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
760         struct be_queue_info *txq = &txo->q;
761         u32 wrb_cnt = 0, copied = 0;
762         u32 start = txq->head;
763         bool dummy_wrb, stopped = false;
764
765         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
766
767         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
768         if (copied) {
769                 /* record the sent skb in the sent_skb table */
770                 BUG_ON(txo->sent_skb_list[start]);
771                 txo->sent_skb_list[start] = skb;
772
773                 /* Ensure txq has space for the next skb; Else stop the queue
774                  * *BEFORE* ringing the tx doorbell, so that we serialze the
775                  * tx compls of the current transmit which'll wake up the queue
776                  */
777                 atomic_add(wrb_cnt, &txq->used);
778                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
779                                                                 txq->len) {
780                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
781                         stopped = true;
782                 }
783
784                 be_txq_notify(adapter, txq->id, wrb_cnt);
785
786                 be_tx_stats_update(txo, wrb_cnt, copied,
787                                 skb_shinfo(skb)->gso_segs, stopped);
788         } else {
789                 txq->head = start;
790                 dev_kfree_skb_any(skb);
791         }
792         return NETDEV_TX_OK;
793 }
794
795 static int be_change_mtu(struct net_device *netdev, int new_mtu)
796 {
797         struct be_adapter *adapter = netdev_priv(netdev);
798         if (new_mtu < BE_MIN_MTU ||
799                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
800                                         (ETH_HLEN + ETH_FCS_LEN))) {
801                 dev_info(&adapter->pdev->dev,
802                         "MTU must be between %d and %d bytes\n",
803                         BE_MIN_MTU,
804                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
805                 return -EINVAL;
806         }
807         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
808                         netdev->mtu, new_mtu);
809         netdev->mtu = new_mtu;
810         return 0;
811 }
812
813 /*
814  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
815  * If the user configures more, place BE in vlan promiscuous mode.
816  */
817 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
818 {
819         u16 vtag[BE_NUM_VLANS_SUPPORTED];
820         u16 ntags = 0, i;
821         int status = 0;
822         u32 if_handle;
823
824         if (vf) {
825                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
826                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
827                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
828         }
829
830         if (adapter->vlans_added <= adapter->max_vlans)  {
831                 /* Construct VLAN Table to give to HW */
832                 for (i = 0; i < VLAN_N_VID; i++) {
833                         if (adapter->vlan_tag[i]) {
834                                 vtag[ntags] = cpu_to_le16(i);
835                                 ntags++;
836                         }
837                 }
838                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
839                                         vtag, ntags, 1, 0);
840         } else {
841                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
842                                         NULL, 0, 1, 1);
843         }
844
845         return status;
846 }
847
848 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
849 {
850         struct be_adapter *adapter = netdev_priv(netdev);
851
852         adapter->vlan_grp = grp;
853 }
854
855 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
856 {
857         struct be_adapter *adapter = netdev_priv(netdev);
858
859         adapter->vlans_added++;
860         if (!be_physfn(adapter))
861                 return;
862
863         adapter->vlan_tag[vid] = 1;
864         if (adapter->vlans_added <= (adapter->max_vlans + 1))
865                 be_vid_config(adapter, false, 0);
866 }
867
868 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
869 {
870         struct be_adapter *adapter = netdev_priv(netdev);
871
872         adapter->vlans_added--;
873         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
874
875         if (!be_physfn(adapter))
876                 return;
877
878         adapter->vlan_tag[vid] = 0;
879         if (adapter->vlans_added <= adapter->max_vlans)
880                 be_vid_config(adapter, false, 0);
881 }
882
883 static void be_set_multicast_list(struct net_device *netdev)
884 {
885         struct be_adapter *adapter = netdev_priv(netdev);
886
887         if (netdev->flags & IFF_PROMISC) {
888                 be_cmd_promiscuous_config(adapter, true);
889                 adapter->promiscuous = true;
890                 goto done;
891         }
892
893         /* BE was previously in promiscuous mode; disable it */
894         if (adapter->promiscuous) {
895                 adapter->promiscuous = false;
896                 be_cmd_promiscuous_config(adapter, false);
897         }
898
899         /* Enable multicast promisc if num configured exceeds what we support */
900         if (netdev->flags & IFF_ALLMULTI ||
901             netdev_mc_count(netdev) > BE_MAX_MC) {
902                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
903                                 &adapter->mc_cmd_mem);
904                 goto done;
905         }
906
907         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
908                 &adapter->mc_cmd_mem);
909 done:
910         return;
911 }
912
913 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
914 {
915         struct be_adapter *adapter = netdev_priv(netdev);
916         int status;
917
918         if (!adapter->sriov_enabled)
919                 return -EPERM;
920
921         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
922                 return -EINVAL;
923
924         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
925                 status = be_cmd_pmac_del(adapter,
926                                         adapter->vf_cfg[vf].vf_if_handle,
927                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
928
929         status = be_cmd_pmac_add(adapter, mac,
930                                 adapter->vf_cfg[vf].vf_if_handle,
931                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
932
933         if (status)
934                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
935                                 mac, vf);
936         else
937                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
938
939         return status;
940 }
941
942 static int be_get_vf_config(struct net_device *netdev, int vf,
943                         struct ifla_vf_info *vi)
944 {
945         struct be_adapter *adapter = netdev_priv(netdev);
946
947         if (!adapter->sriov_enabled)
948                 return -EPERM;
949
950         if (vf >= num_vfs)
951                 return -EINVAL;
952
953         vi->vf = vf;
954         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
955         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
956         vi->qos = 0;
957         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
958
959         return 0;
960 }
961
962 static int be_set_vf_vlan(struct net_device *netdev,
963                         int vf, u16 vlan, u8 qos)
964 {
965         struct be_adapter *adapter = netdev_priv(netdev);
966         int status = 0;
967
968         if (!adapter->sriov_enabled)
969                 return -EPERM;
970
971         if ((vf >= num_vfs) || (vlan > 4095))
972                 return -EINVAL;
973
974         if (vlan) {
975                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
976                 adapter->vlans_added++;
977         } else {
978                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
979                 adapter->vlans_added--;
980         }
981
982         status = be_vid_config(adapter, true, vf);
983
984         if (status)
985                 dev_info(&adapter->pdev->dev,
986                                 "VLAN %d config on VF %d failed\n", vlan, vf);
987         return status;
988 }
989
990 static int be_set_vf_tx_rate(struct net_device *netdev,
991                         int vf, int rate)
992 {
993         struct be_adapter *adapter = netdev_priv(netdev);
994         int status = 0;
995
996         if (!adapter->sriov_enabled)
997                 return -EPERM;
998
999         if ((vf >= num_vfs) || (rate < 0))
1000                 return -EINVAL;
1001
1002         if (rate > 10000)
1003                 rate = 10000;
1004
1005         adapter->vf_cfg[vf].vf_tx_rate = rate;
1006         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1007
1008         if (status)
1009                 dev_info(&adapter->pdev->dev,
1010                                 "tx rate %d on VF %d failed\n", rate, vf);
1011         return status;
1012 }
1013
1014 static void be_rx_rate_update(struct be_rx_obj *rxo)
1015 {
1016         struct be_rx_stats *stats = &rxo->stats;
1017         ulong now = jiffies;
1018
1019         /* Wrapped around */
1020         if (time_before(now, stats->rx_jiffies)) {
1021                 stats->rx_jiffies = now;
1022                 return;
1023         }
1024
1025         /* Update the rate once in two seconds */
1026         if ((now - stats->rx_jiffies) < 2 * HZ)
1027                 return;
1028
1029         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1030                                 now - stats->rx_jiffies);
1031         stats->rx_jiffies = now;
1032         stats->rx_bytes_prev = stats->rx_bytes;
1033 }
1034
1035 static void be_rx_stats_update(struct be_rx_obj *rxo,
1036                 struct be_rx_compl_info *rxcp)
1037 {
1038         struct be_rx_stats *stats = &rxo->stats;
1039
1040         stats->rx_compl++;
1041         stats->rx_frags += rxcp->num_rcvd;
1042         stats->rx_bytes += rxcp->pkt_size;
1043         stats->rx_pkts++;
1044         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1045                 stats->rx_mcast_pkts++;
1046         if (rxcp->err)
1047                 stats->rxcp_err++;
1048 }
1049
1050 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1051 {
1052         /* L4 checksum is not reliable for non TCP/UDP packets.
1053          * Also ignore ipcksm for ipv6 pkts */
1054         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1055                                 (rxcp->ip_csum || rxcp->ipv6);
1056 }
1057
1058 static struct be_rx_page_info *
1059 get_rx_page_info(struct be_adapter *adapter,
1060                 struct be_rx_obj *rxo,
1061                 u16 frag_idx)
1062 {
1063         struct be_rx_page_info *rx_page_info;
1064         struct be_queue_info *rxq = &rxo->q;
1065
1066         rx_page_info = &rxo->page_info_tbl[frag_idx];
1067         BUG_ON(!rx_page_info->page);
1068
1069         if (rx_page_info->last_page_user) {
1070                 dma_unmap_page(&adapter->pdev->dev,
1071                                dma_unmap_addr(rx_page_info, bus),
1072                                adapter->big_page_size, DMA_FROM_DEVICE);
1073                 rx_page_info->last_page_user = false;
1074         }
1075
1076         atomic_dec(&rxq->used);
1077         return rx_page_info;
1078 }
1079
1080 /* Throwaway the data in the Rx completion */
1081 static void be_rx_compl_discard(struct be_adapter *adapter,
1082                 struct be_rx_obj *rxo,
1083                 struct be_rx_compl_info *rxcp)
1084 {
1085         struct be_queue_info *rxq = &rxo->q;
1086         struct be_rx_page_info *page_info;
1087         u16 i, num_rcvd = rxcp->num_rcvd;
1088
1089         for (i = 0; i < num_rcvd; i++) {
1090                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1091                 put_page(page_info->page);
1092                 memset(page_info, 0, sizeof(*page_info));
1093                 index_inc(&rxcp->rxq_idx, rxq->len);
1094         }
1095 }
1096
1097 /*
1098  * skb_fill_rx_data forms a complete skb for an ether frame
1099  * indicated by rxcp.
1100  */
1101 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1102                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1103 {
1104         struct be_queue_info *rxq = &rxo->q;
1105         struct be_rx_page_info *page_info;
1106         u16 i, j;
1107         u16 hdr_len, curr_frag_len, remaining;
1108         u8 *start;
1109
1110         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1111         start = page_address(page_info->page) + page_info->page_offset;
1112         prefetch(start);
1113
1114         /* Copy data in the first descriptor of this completion */
1115         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1116
1117         /* Copy the header portion into skb_data */
1118         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1119         memcpy(skb->data, start, hdr_len);
1120         skb->len = curr_frag_len;
1121         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1122                 /* Complete packet has now been moved to data */
1123                 put_page(page_info->page);
1124                 skb->data_len = 0;
1125                 skb->tail += curr_frag_len;
1126         } else {
1127                 skb_shinfo(skb)->nr_frags = 1;
1128                 skb_shinfo(skb)->frags[0].page = page_info->page;
1129                 skb_shinfo(skb)->frags[0].page_offset =
1130                                         page_info->page_offset + hdr_len;
1131                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1132                 skb->data_len = curr_frag_len - hdr_len;
1133                 skb->tail += hdr_len;
1134         }
1135         page_info->page = NULL;
1136
1137         if (rxcp->pkt_size <= rx_frag_size) {
1138                 BUG_ON(rxcp->num_rcvd != 1);
1139                 return;
1140         }
1141
1142         /* More frags present for this completion */
1143         index_inc(&rxcp->rxq_idx, rxq->len);
1144         remaining = rxcp->pkt_size - curr_frag_len;
1145         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1146                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1147                 curr_frag_len = min(remaining, rx_frag_size);
1148
1149                 /* Coalesce all frags from the same physical page in one slot */
1150                 if (page_info->page_offset == 0) {
1151                         /* Fresh page */
1152                         j++;
1153                         skb_shinfo(skb)->frags[j].page = page_info->page;
1154                         skb_shinfo(skb)->frags[j].page_offset =
1155                                                         page_info->page_offset;
1156                         skb_shinfo(skb)->frags[j].size = 0;
1157                         skb_shinfo(skb)->nr_frags++;
1158                 } else {
1159                         put_page(page_info->page);
1160                 }
1161
1162                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1163                 skb->len += curr_frag_len;
1164                 skb->data_len += curr_frag_len;
1165
1166                 remaining -= curr_frag_len;
1167                 index_inc(&rxcp->rxq_idx, rxq->len);
1168                 page_info->page = NULL;
1169         }
1170         BUG_ON(j > MAX_SKB_FRAGS);
1171 }
1172
1173 /* Process the RX completion indicated by rxcp when GRO is disabled */
1174 static void be_rx_compl_process(struct be_adapter *adapter,
1175                         struct be_rx_obj *rxo,
1176                         struct be_rx_compl_info *rxcp)
1177 {
1178         struct net_device *netdev = adapter->netdev;
1179         struct sk_buff *skb;
1180
1181         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1182         if (unlikely(!skb)) {
1183                 if (net_ratelimit())
1184                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1185                 be_rx_compl_discard(adapter, rxo, rxcp);
1186                 return;
1187         }
1188
1189         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1190
1191         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1192                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1193         else
1194                 skb_checksum_none_assert(skb);
1195
1196         skb->truesize = skb->len + sizeof(struct sk_buff);
1197         skb->protocol = eth_type_trans(skb, netdev);
1198         if (adapter->netdev->features & NETIF_F_RXHASH)
1199                 skb->rxhash = rxcp->rss_hash;
1200
1201
1202         if (unlikely(rxcp->vlanf)) {
1203                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1204                         kfree_skb(skb);
1205                         return;
1206                 }
1207                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1208                                         rxcp->vlan_tag);
1209         } else {
1210                 netif_receive_skb(skb);
1211         }
1212 }
1213
1214 /* Process the RX completion indicated by rxcp when GRO is enabled */
1215 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1216                 struct be_rx_obj *rxo,
1217                 struct be_rx_compl_info *rxcp)
1218 {
1219         struct be_rx_page_info *page_info;
1220         struct sk_buff *skb = NULL;
1221         struct be_queue_info *rxq = &rxo->q;
1222         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1223         u16 remaining, curr_frag_len;
1224         u16 i, j;
1225
1226         skb = napi_get_frags(&eq_obj->napi);
1227         if (!skb) {
1228                 be_rx_compl_discard(adapter, rxo, rxcp);
1229                 return;
1230         }
1231
1232         remaining = rxcp->pkt_size;
1233         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1234                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1235
1236                 curr_frag_len = min(remaining, rx_frag_size);
1237
1238                 /* Coalesce all frags from the same physical page in one slot */
1239                 if (i == 0 || page_info->page_offset == 0) {
1240                         /* First frag or Fresh page */
1241                         j++;
1242                         skb_shinfo(skb)->frags[j].page = page_info->page;
1243                         skb_shinfo(skb)->frags[j].page_offset =
1244                                                         page_info->page_offset;
1245                         skb_shinfo(skb)->frags[j].size = 0;
1246                 } else {
1247                         put_page(page_info->page);
1248                 }
1249                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1250
1251                 remaining -= curr_frag_len;
1252                 index_inc(&rxcp->rxq_idx, rxq->len);
1253                 memset(page_info, 0, sizeof(*page_info));
1254         }
1255         BUG_ON(j > MAX_SKB_FRAGS);
1256
1257         skb_shinfo(skb)->nr_frags = j + 1;
1258         skb->len = rxcp->pkt_size;
1259         skb->data_len = rxcp->pkt_size;
1260         skb->truesize += rxcp->pkt_size;
1261         skb->ip_summed = CHECKSUM_UNNECESSARY;
1262         if (adapter->netdev->features & NETIF_F_RXHASH)
1263                 skb->rxhash = rxcp->rss_hash;
1264
1265         if (likely(!rxcp->vlanf))
1266                 napi_gro_frags(&eq_obj->napi);
1267         else
1268                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1269                                 rxcp->vlan_tag);
1270 }
1271
1272 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1273                                 struct be_eth_rx_compl *compl,
1274                                 struct be_rx_compl_info *rxcp)
1275 {
1276         rxcp->pkt_size =
1277                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1278         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1279         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1280         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1281         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1282         rxcp->ip_csum =
1283                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1284         rxcp->l4_csum =
1285                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1286         rxcp->ipv6 =
1287                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1288         rxcp->rxq_idx =
1289                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1290         rxcp->num_rcvd =
1291                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1292         rxcp->pkt_type =
1293                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1294         rxcp->rss_hash =
1295                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1296         if (rxcp->vlanf) {
1297                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1298                                           compl);
1299                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1300                                                compl);
1301         }
1302 }
1303
1304 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1305                                 struct be_eth_rx_compl *compl,
1306                                 struct be_rx_compl_info *rxcp)
1307 {
1308         rxcp->pkt_size =
1309                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1310         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1311         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1312         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1313         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1314         rxcp->ip_csum =
1315                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1316         rxcp->l4_csum =
1317                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1318         rxcp->ipv6 =
1319                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1320         rxcp->rxq_idx =
1321                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1322         rxcp->num_rcvd =
1323                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1324         rxcp->pkt_type =
1325                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1326         rxcp->rss_hash =
1327                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1328         if (rxcp->vlanf) {
1329                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1330                                           compl);
1331                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1332                                                compl);
1333         }
1334 }
1335
1336 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1337 {
1338         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1339         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1340         struct be_adapter *adapter = rxo->adapter;
1341
1342         /* For checking the valid bit it is Ok to use either definition as the
1343          * valid bit is at the same position in both v0 and v1 Rx compl */
1344         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1345                 return NULL;
1346
1347         rmb();
1348         be_dws_le_to_cpu(compl, sizeof(*compl));
1349
1350         if (adapter->be3_native)
1351                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1352         else
1353                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1354
1355         if (rxcp->vlanf) {
1356                 /* vlanf could be wrongly set in some cards.
1357                  * ignore if vtm is not set */
1358                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1359                         rxcp->vlanf = 0;
1360
1361                 if (!lancer_chip(adapter))
1362                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1363
1364                 if (((adapter->pvid & VLAN_VID_MASK) ==
1365                      (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1366                     !adapter->vlan_tag[rxcp->vlan_tag])
1367                         rxcp->vlanf = 0;
1368         }
1369
1370         /* As the compl has been parsed, reset it; we wont touch it again */
1371         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1372
1373         queue_tail_inc(&rxo->cq);
1374         return rxcp;
1375 }
1376
1377 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1378 {
1379         u32 order = get_order(size);
1380
1381         if (order > 0)
1382                 gfp |= __GFP_COMP;
1383         return  alloc_pages(gfp, order);
1384 }
1385
1386 /*
1387  * Allocate a page, split it to fragments of size rx_frag_size and post as
1388  * receive buffers to BE
1389  */
1390 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1391 {
1392         struct be_adapter *adapter = rxo->adapter;
1393         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1394         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1395         struct be_queue_info *rxq = &rxo->q;
1396         struct page *pagep = NULL;
1397         struct be_eth_rx_d *rxd;
1398         u64 page_dmaaddr = 0, frag_dmaaddr;
1399         u32 posted, page_offset = 0;
1400
1401         page_info = &rxo->page_info_tbl[rxq->head];
1402         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1403                 if (!pagep) {
1404                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1405                         if (unlikely(!pagep)) {
1406                                 rxo->stats.rx_post_fail++;
1407                                 break;
1408                         }
1409                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1410                                                     0, adapter->big_page_size,
1411                                                     DMA_FROM_DEVICE);
1412                         page_info->page_offset = 0;
1413                 } else {
1414                         get_page(pagep);
1415                         page_info->page_offset = page_offset + rx_frag_size;
1416                 }
1417                 page_offset = page_info->page_offset;
1418                 page_info->page = pagep;
1419                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1420                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1421
1422                 rxd = queue_head_node(rxq);
1423                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1424                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1425
1426                 /* Any space left in the current big page for another frag? */
1427                 if ((page_offset + rx_frag_size + rx_frag_size) >
1428                                         adapter->big_page_size) {
1429                         pagep = NULL;
1430                         page_info->last_page_user = true;
1431                 }
1432
1433                 prev_page_info = page_info;
1434                 queue_head_inc(rxq);
1435                 page_info = &page_info_tbl[rxq->head];
1436         }
1437         if (pagep)
1438                 prev_page_info->last_page_user = true;
1439
1440         if (posted) {
1441                 atomic_add(posted, &rxq->used);
1442                 be_rxq_notify(adapter, rxq->id, posted);
1443         } else if (atomic_read(&rxq->used) == 0) {
1444                 /* Let be_worker replenish when memory is available */
1445                 rxo->rx_post_starved = true;
1446         }
1447 }
1448
1449 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1450 {
1451         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1452
1453         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1454                 return NULL;
1455
1456         rmb();
1457         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1458
1459         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1460
1461         queue_tail_inc(tx_cq);
1462         return txcp;
1463 }
1464
1465 static u16 be_tx_compl_process(struct be_adapter *adapter,
1466                 struct be_tx_obj *txo, u16 last_index)
1467 {
1468         struct be_queue_info *txq = &txo->q;
1469         struct be_eth_wrb *wrb;
1470         struct sk_buff **sent_skbs = txo->sent_skb_list;
1471         struct sk_buff *sent_skb;
1472         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1473         bool unmap_skb_hdr = true;
1474
1475         sent_skb = sent_skbs[txq->tail];
1476         BUG_ON(!sent_skb);
1477         sent_skbs[txq->tail] = NULL;
1478
1479         /* skip header wrb */
1480         queue_tail_inc(txq);
1481
1482         do {
1483                 cur_index = txq->tail;
1484                 wrb = queue_tail_node(txq);
1485                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1486                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1487                 unmap_skb_hdr = false;
1488
1489                 num_wrbs++;
1490                 queue_tail_inc(txq);
1491         } while (cur_index != last_index);
1492
1493         kfree_skb(sent_skb);
1494         return num_wrbs;
1495 }
1496
1497 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1498 {
1499         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1500
1501         if (!eqe->evt)
1502                 return NULL;
1503
1504         rmb();
1505         eqe->evt = le32_to_cpu(eqe->evt);
1506         queue_tail_inc(&eq_obj->q);
1507         return eqe;
1508 }
1509
1510 static int event_handle(struct be_adapter *adapter,
1511                         struct be_eq_obj *eq_obj,
1512                         bool rearm)
1513 {
1514         struct be_eq_entry *eqe;
1515         u16 num = 0;
1516
1517         while ((eqe = event_get(eq_obj)) != NULL) {
1518                 eqe->evt = 0;
1519                 num++;
1520         }
1521
1522         /* Deal with any spurious interrupts that come
1523          * without events
1524          */
1525         if (!num)
1526                 rearm = true;
1527
1528         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1529         if (num)
1530                 napi_schedule(&eq_obj->napi);
1531
1532         return num;
1533 }
1534
1535 /* Just read and notify events without processing them.
1536  * Used at the time of destroying event queues */
1537 static void be_eq_clean(struct be_adapter *adapter,
1538                         struct be_eq_obj *eq_obj)
1539 {
1540         struct be_eq_entry *eqe;
1541         u16 num = 0;
1542
1543         while ((eqe = event_get(eq_obj)) != NULL) {
1544                 eqe->evt = 0;
1545                 num++;
1546         }
1547
1548         if (num)
1549                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1550 }
1551
1552 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1553 {
1554         struct be_rx_page_info *page_info;
1555         struct be_queue_info *rxq = &rxo->q;
1556         struct be_queue_info *rx_cq = &rxo->cq;
1557         struct be_rx_compl_info *rxcp;
1558         u16 tail;
1559
1560         /* First cleanup pending rx completions */
1561         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1562                 be_rx_compl_discard(adapter, rxo, rxcp);
1563                 be_cq_notify(adapter, rx_cq->id, false, 1);
1564         }
1565
1566         /* Then free posted rx buffer that were not used */
1567         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1568         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1569                 page_info = get_rx_page_info(adapter, rxo, tail);
1570                 put_page(page_info->page);
1571                 memset(page_info, 0, sizeof(*page_info));
1572         }
1573         BUG_ON(atomic_read(&rxq->used));
1574 }
1575
1576 static void be_tx_compl_clean(struct be_adapter *adapter,
1577                                 struct be_tx_obj *txo)
1578 {
1579         struct be_queue_info *tx_cq = &txo->cq;
1580         struct be_queue_info *txq = &txo->q;
1581         struct be_eth_tx_compl *txcp;
1582         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1583         struct sk_buff **sent_skbs = txo->sent_skb_list;
1584         struct sk_buff *sent_skb;
1585         bool dummy_wrb;
1586
1587         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1588         do {
1589                 while ((txcp = be_tx_compl_get(tx_cq))) {
1590                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1591                                         wrb_index, txcp);
1592                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1593                         cmpl++;
1594                 }
1595                 if (cmpl) {
1596                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1597                         atomic_sub(num_wrbs, &txq->used);
1598                         cmpl = 0;
1599                         num_wrbs = 0;
1600                 }
1601
1602                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1603                         break;
1604
1605                 mdelay(1);
1606         } while (true);
1607
1608         if (atomic_read(&txq->used))
1609                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1610                         atomic_read(&txq->used));
1611
1612         /* free posted tx for which compls will never arrive */
1613         while (atomic_read(&txq->used)) {
1614                 sent_skb = sent_skbs[txq->tail];
1615                 end_idx = txq->tail;
1616                 index_adv(&end_idx,
1617                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1618                         txq->len);
1619                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1620                 atomic_sub(num_wrbs, &txq->used);
1621         }
1622 }
1623
1624 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1625 {
1626         struct be_queue_info *q;
1627
1628         q = &adapter->mcc_obj.q;
1629         if (q->created)
1630                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1631         be_queue_free(adapter, q);
1632
1633         q = &adapter->mcc_obj.cq;
1634         if (q->created)
1635                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1636         be_queue_free(adapter, q);
1637 }
1638
1639 /* Must be called only after TX qs are created as MCC shares TX EQ */
1640 static int be_mcc_queues_create(struct be_adapter *adapter)
1641 {
1642         struct be_queue_info *q, *cq;
1643
1644         /* Alloc MCC compl queue */
1645         cq = &adapter->mcc_obj.cq;
1646         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1647                         sizeof(struct be_mcc_compl)))
1648                 goto err;
1649
1650         /* Ask BE to create MCC compl queue; share TX's eq */
1651         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1652                 goto mcc_cq_free;
1653
1654         /* Alloc MCC queue */
1655         q = &adapter->mcc_obj.q;
1656         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1657                 goto mcc_cq_destroy;
1658
1659         /* Ask BE to create MCC queue */
1660         if (be_cmd_mccq_create(adapter, q, cq))
1661                 goto mcc_q_free;
1662
1663         return 0;
1664
1665 mcc_q_free:
1666         be_queue_free(adapter, q);
1667 mcc_cq_destroy:
1668         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1669 mcc_cq_free:
1670         be_queue_free(adapter, cq);
1671 err:
1672         return -1;
1673 }
1674
1675 static void be_tx_queues_destroy(struct be_adapter *adapter)
1676 {
1677         struct be_queue_info *q;
1678         struct be_tx_obj *txo;
1679         u8 i;
1680
1681         for_all_tx_queues(adapter, txo, i) {
1682                 q = &txo->q;
1683                 if (q->created)
1684                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1685                 be_queue_free(adapter, q);
1686
1687                 q = &txo->cq;
1688                 if (q->created)
1689                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1690                 be_queue_free(adapter, q);
1691         }
1692
1693         /* Clear any residual events */
1694         be_eq_clean(adapter, &adapter->tx_eq);
1695
1696         q = &adapter->tx_eq.q;
1697         if (q->created)
1698                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1699         be_queue_free(adapter, q);
1700 }
1701
1702 /* One TX event queue is shared by all TX compl qs */
1703 static int be_tx_queues_create(struct be_adapter *adapter)
1704 {
1705         struct be_queue_info *eq, *q, *cq;
1706         struct be_tx_obj *txo;
1707         u8 i;
1708
1709         adapter->tx_eq.max_eqd = 0;
1710         adapter->tx_eq.min_eqd = 0;
1711         adapter->tx_eq.cur_eqd = 96;
1712         adapter->tx_eq.enable_aic = false;
1713
1714         eq = &adapter->tx_eq.q;
1715         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1716                 sizeof(struct be_eq_entry)))
1717                 return -1;
1718
1719         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1720                 goto err;
1721         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1722
1723         for_all_tx_queues(adapter, txo, i) {
1724                 cq = &txo->cq;
1725                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1726                         sizeof(struct be_eth_tx_compl)))
1727                         goto err;
1728
1729                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1730                         goto err;
1731
1732                 q = &txo->q;
1733                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1734                         sizeof(struct be_eth_wrb)))
1735                         goto err;
1736
1737                 if (be_cmd_txq_create(adapter, q, cq))
1738                         goto err;
1739         }
1740         return 0;
1741
1742 err:
1743         be_tx_queues_destroy(adapter);
1744         return -1;
1745 }
1746
1747 static void be_rx_queues_destroy(struct be_adapter *adapter)
1748 {
1749         struct be_queue_info *q;
1750         struct be_rx_obj *rxo;
1751         int i;
1752
1753         for_all_rx_queues(adapter, rxo, i) {
1754                 q = &rxo->q;
1755                 if (q->created) {
1756                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1757                         /* After the rxq is invalidated, wait for a grace time
1758                          * of 1ms for all dma to end and the flush compl to
1759                          * arrive
1760                          */
1761                         mdelay(1);
1762                         be_rx_q_clean(adapter, rxo);
1763                 }
1764                 be_queue_free(adapter, q);
1765
1766                 q = &rxo->cq;
1767                 if (q->created)
1768                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1769                 be_queue_free(adapter, q);
1770
1771                 /* Clear any residual events */
1772                 q = &rxo->rx_eq.q;
1773                 if (q->created) {
1774                         be_eq_clean(adapter, &rxo->rx_eq);
1775                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1776                 }
1777                 be_queue_free(adapter, q);
1778         }
1779 }
1780
1781 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1782 {
1783         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1784                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1785                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1786         } else {
1787                 dev_warn(&adapter->pdev->dev,
1788                         "No support for multiple RX queues\n");
1789                 return 1;
1790         }
1791 }
1792
1793 static int be_rx_queues_create(struct be_adapter *adapter)
1794 {
1795         struct be_queue_info *eq, *q, *cq;
1796         struct be_rx_obj *rxo;
1797         int rc, i;
1798
1799         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1800                                 msix_enabled(adapter) ?
1801                                         adapter->num_msix_vec - 1 : 1);
1802         if (adapter->num_rx_qs != MAX_RX_QS)
1803                 dev_warn(&adapter->pdev->dev,
1804                         "Can create only %d RX queues", adapter->num_rx_qs);
1805
1806         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1807         for_all_rx_queues(adapter, rxo, i) {
1808                 rxo->adapter = adapter;
1809                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1810                 rxo->rx_eq.enable_aic = true;
1811
1812                 /* EQ */
1813                 eq = &rxo->rx_eq.q;
1814                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1815                                         sizeof(struct be_eq_entry));
1816                 if (rc)
1817                         goto err;
1818
1819                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1820                 if (rc)
1821                         goto err;
1822
1823                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1824
1825                 /* CQ */
1826                 cq = &rxo->cq;
1827                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1828                                 sizeof(struct be_eth_rx_compl));
1829                 if (rc)
1830                         goto err;
1831
1832                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1833                 if (rc)
1834                         goto err;
1835                 /* Rx Q */
1836                 q = &rxo->q;
1837                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1838                                 sizeof(struct be_eth_rx_d));
1839                 if (rc)
1840                         goto err;
1841
1842                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1843                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1844                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1845                 if (rc)
1846                         goto err;
1847         }
1848
1849         if (be_multi_rxq(adapter)) {
1850                 u8 rsstable[MAX_RSS_QS];
1851
1852                 for_all_rss_queues(adapter, rxo, i)
1853                         rsstable[i] = rxo->rss_id;
1854
1855                 rc = be_cmd_rss_config(adapter, rsstable,
1856                         adapter->num_rx_qs - 1);
1857                 if (rc)
1858                         goto err;
1859         }
1860
1861         return 0;
1862 err:
1863         be_rx_queues_destroy(adapter);
1864         return -1;
1865 }
1866
1867 static bool event_peek(struct be_eq_obj *eq_obj)
1868 {
1869         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1870         if (!eqe->evt)
1871                 return false;
1872         else
1873                 return true;
1874 }
1875
1876 static irqreturn_t be_intx(int irq, void *dev)
1877 {
1878         struct be_adapter *adapter = dev;
1879         struct be_rx_obj *rxo;
1880         int isr, i, tx = 0 , rx = 0;
1881
1882         if (lancer_chip(adapter)) {
1883                 if (event_peek(&adapter->tx_eq))
1884                         tx = event_handle(adapter, &adapter->tx_eq, false);
1885                 for_all_rx_queues(adapter, rxo, i) {
1886                         if (event_peek(&rxo->rx_eq))
1887                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1888                 }
1889
1890                 if (!(tx || rx))
1891                         return IRQ_NONE;
1892
1893         } else {
1894                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1895                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1896                 if (!isr)
1897                         return IRQ_NONE;
1898
1899                 if ((1 << adapter->tx_eq.eq_idx & isr))
1900                         event_handle(adapter, &adapter->tx_eq, false);
1901
1902                 for_all_rx_queues(adapter, rxo, i) {
1903                         if ((1 << rxo->rx_eq.eq_idx & isr))
1904                                 event_handle(adapter, &rxo->rx_eq, true);
1905                 }
1906         }
1907
1908         return IRQ_HANDLED;
1909 }
1910
1911 static irqreturn_t be_msix_rx(int irq, void *dev)
1912 {
1913         struct be_rx_obj *rxo = dev;
1914         struct be_adapter *adapter = rxo->adapter;
1915
1916         event_handle(adapter, &rxo->rx_eq, true);
1917
1918         return IRQ_HANDLED;
1919 }
1920
1921 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1922 {
1923         struct be_adapter *adapter = dev;
1924
1925         event_handle(adapter, &adapter->tx_eq, false);
1926
1927         return IRQ_HANDLED;
1928 }
1929
1930 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1931 {
1932         return (rxcp->tcpf && !rxcp->err) ? true : false;
1933 }
1934
1935 static int be_poll_rx(struct napi_struct *napi, int budget)
1936 {
1937         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1938         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1939         struct be_adapter *adapter = rxo->adapter;
1940         struct be_queue_info *rx_cq = &rxo->cq;
1941         struct be_rx_compl_info *rxcp;
1942         u32 work_done;
1943
1944         rxo->stats.rx_polls++;
1945         for (work_done = 0; work_done < budget; work_done++) {
1946                 rxcp = be_rx_compl_get(rxo);
1947                 if (!rxcp)
1948                         break;
1949
1950                 /* Ignore flush completions */
1951                 if (rxcp->num_rcvd && rxcp->pkt_size) {
1952                         if (do_gro(rxcp))
1953                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1954                         else
1955                                 be_rx_compl_process(adapter, rxo, rxcp);
1956                 } else if (rxcp->pkt_size == 0) {
1957                         be_rx_compl_discard(adapter, rxo, rxcp);
1958                 }
1959
1960                 be_rx_stats_update(rxo, rxcp);
1961         }
1962
1963         /* Refill the queue */
1964         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1965                 be_post_rx_frags(rxo, GFP_ATOMIC);
1966
1967         /* All consumed */
1968         if (work_done < budget) {
1969                 napi_complete(napi);
1970                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1971         } else {
1972                 /* More to be consumed; continue with interrupts disabled */
1973                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1974         }
1975         return work_done;
1976 }
1977
1978 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1979  * For TX/MCC we don't honour budget; consume everything
1980  */
1981 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1982 {
1983         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1984         struct be_adapter *adapter =
1985                 container_of(tx_eq, struct be_adapter, tx_eq);
1986         struct be_tx_obj *txo;
1987         struct be_eth_tx_compl *txcp;
1988         int tx_compl, mcc_compl, status = 0;
1989         u8 i;
1990         u16 num_wrbs;
1991
1992         for_all_tx_queues(adapter, txo, i) {
1993                 tx_compl = 0;
1994                 num_wrbs = 0;
1995                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1996                         num_wrbs += be_tx_compl_process(adapter, txo,
1997                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1998                                         wrb_index, txcp));
1999                         tx_compl++;
2000                 }
2001                 if (tx_compl) {
2002                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2003
2004                         atomic_sub(num_wrbs, &txo->q.used);
2005
2006                         /* As Tx wrbs have been freed up, wake up netdev queue
2007                          * if it was stopped due to lack of tx wrbs.  */
2008                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
2009                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
2010                                 netif_wake_subqueue(adapter->netdev, i);
2011                         }
2012
2013                         adapter->drv_stats.be_tx_events++;
2014                         txo->stats.be_tx_compl += tx_compl;
2015                 }
2016         }
2017
2018         mcc_compl = be_process_mcc(adapter, &status);
2019
2020         if (mcc_compl) {
2021                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2022                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2023         }
2024
2025         napi_complete(napi);
2026
2027         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2028         return 1;
2029 }
2030
2031 void be_detect_dump_ue(struct be_adapter *adapter)
2032 {
2033         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2034         u32 i;
2035
2036         pci_read_config_dword(adapter->pdev,
2037                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2038         pci_read_config_dword(adapter->pdev,
2039                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2040         pci_read_config_dword(adapter->pdev,
2041                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2042         pci_read_config_dword(adapter->pdev,
2043                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2044
2045         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2046         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2047
2048         if (ue_status_lo || ue_status_hi) {
2049                 adapter->ue_detected = true;
2050                 adapter->eeh_err = true;
2051                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2052         }
2053
2054         if (ue_status_lo) {
2055                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2056                         if (ue_status_lo & 1)
2057                                 dev_err(&adapter->pdev->dev,
2058                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2059                 }
2060         }
2061         if (ue_status_hi) {
2062                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2063                         if (ue_status_hi & 1)
2064                                 dev_err(&adapter->pdev->dev,
2065                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2066                 }
2067         }
2068
2069 }
2070
2071 static void be_worker(struct work_struct *work)
2072 {
2073         struct be_adapter *adapter =
2074                 container_of(work, struct be_adapter, work.work);
2075         struct be_rx_obj *rxo;
2076         struct be_tx_obj *txo;
2077         int i;
2078
2079         if (!adapter->ue_detected && !lancer_chip(adapter))
2080                 be_detect_dump_ue(adapter);
2081
2082         /* when interrupts are not yet enabled, just reap any pending
2083         * mcc completions */
2084         if (!netif_running(adapter->netdev)) {
2085                 int mcc_compl, status = 0;
2086
2087                 mcc_compl = be_process_mcc(adapter, &status);
2088
2089                 if (mcc_compl) {
2090                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2091                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2092                 }
2093
2094                 goto reschedule;
2095         }
2096
2097         if (!adapter->stats_cmd_sent) {
2098                 if (lancer_chip(adapter))
2099                         lancer_cmd_get_pport_stats(adapter,
2100                                                 &adapter->stats_cmd);
2101                 else
2102                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2103         }
2104
2105         for_all_tx_queues(adapter, txo, i)
2106                 be_tx_rate_update(txo);
2107
2108         for_all_rx_queues(adapter, rxo, i) {
2109                 be_rx_rate_update(rxo);
2110                 be_rx_eqd_update(adapter, rxo);
2111
2112                 if (rxo->rx_post_starved) {
2113                         rxo->rx_post_starved = false;
2114                         be_post_rx_frags(rxo, GFP_KERNEL);
2115                 }
2116         }
2117
2118 reschedule:
2119         adapter->work_counter++;
2120         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2121 }
2122
2123 static void be_msix_disable(struct be_adapter *adapter)
2124 {
2125         if (msix_enabled(adapter)) {
2126                 pci_disable_msix(adapter->pdev);
2127                 adapter->num_msix_vec = 0;
2128         }
2129 }
2130
2131 static void be_msix_enable(struct be_adapter *adapter)
2132 {
2133 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2134         int i, status, num_vec;
2135
2136         num_vec = be_num_rxqs_want(adapter) + 1;
2137
2138         for (i = 0; i < num_vec; i++)
2139                 adapter->msix_entries[i].entry = i;
2140
2141         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2142         if (status == 0) {
2143                 goto done;
2144         } else if (status >= BE_MIN_MSIX_VECTORS) {
2145                 num_vec = status;
2146                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2147                                 num_vec) == 0)
2148                         goto done;
2149         }
2150         return;
2151 done:
2152         adapter->num_msix_vec = num_vec;
2153         return;
2154 }
2155
2156 static void be_sriov_enable(struct be_adapter *adapter)
2157 {
2158         be_check_sriov_fn_type(adapter);
2159 #ifdef CONFIG_PCI_IOV
2160         if (be_physfn(adapter) && num_vfs) {
2161                 int status, pos;
2162                 u16 nvfs;
2163
2164                 pos = pci_find_ext_capability(adapter->pdev,
2165                                                 PCI_EXT_CAP_ID_SRIOV);
2166                 pci_read_config_word(adapter->pdev,
2167                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2168
2169                 if (num_vfs > nvfs) {
2170                         dev_info(&adapter->pdev->dev,
2171                                         "Device supports %d VFs and not %d\n",
2172                                         nvfs, num_vfs);
2173                         num_vfs = nvfs;
2174                 }
2175
2176                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2177                 adapter->sriov_enabled = status ? false : true;
2178         }
2179 #endif
2180 }
2181
2182 static void be_sriov_disable(struct be_adapter *adapter)
2183 {
2184 #ifdef CONFIG_PCI_IOV
2185         if (adapter->sriov_enabled) {
2186                 pci_disable_sriov(adapter->pdev);
2187                 adapter->sriov_enabled = false;
2188         }
2189 #endif
2190 }
2191
2192 static inline int be_msix_vec_get(struct be_adapter *adapter,
2193                                         struct be_eq_obj *eq_obj)
2194 {
2195         return adapter->msix_entries[eq_obj->eq_idx].vector;
2196 }
2197
2198 static int be_request_irq(struct be_adapter *adapter,
2199                 struct be_eq_obj *eq_obj,
2200                 void *handler, char *desc, void *context)
2201 {
2202         struct net_device *netdev = adapter->netdev;
2203         int vec;
2204
2205         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2206         vec = be_msix_vec_get(adapter, eq_obj);
2207         return request_irq(vec, handler, 0, eq_obj->desc, context);
2208 }
2209
2210 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2211                         void *context)
2212 {
2213         int vec = be_msix_vec_get(adapter, eq_obj);
2214         free_irq(vec, context);
2215 }
2216
2217 static int be_msix_register(struct be_adapter *adapter)
2218 {
2219         struct be_rx_obj *rxo;
2220         int status, i;
2221         char qname[10];
2222
2223         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2224                                 adapter);
2225         if (status)
2226                 goto err;
2227
2228         for_all_rx_queues(adapter, rxo, i) {
2229                 sprintf(qname, "rxq%d", i);
2230                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2231                                 qname, rxo);
2232                 if (status)
2233                         goto err_msix;
2234         }
2235
2236         return 0;
2237
2238 err_msix:
2239         be_free_irq(adapter, &adapter->tx_eq, adapter);
2240
2241         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2242                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2243
2244 err:
2245         dev_warn(&adapter->pdev->dev,
2246                 "MSIX Request IRQ failed - err %d\n", status);
2247         be_msix_disable(adapter);
2248         return status;
2249 }
2250
2251 static int be_irq_register(struct be_adapter *adapter)
2252 {
2253         struct net_device *netdev = adapter->netdev;
2254         int status;
2255
2256         if (msix_enabled(adapter)) {
2257                 status = be_msix_register(adapter);
2258                 if (status == 0)
2259                         goto done;
2260                 /* INTx is not supported for VF */
2261                 if (!be_physfn(adapter))
2262                         return status;
2263         }
2264
2265         /* INTx */
2266         netdev->irq = adapter->pdev->irq;
2267         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2268                         adapter);
2269         if (status) {
2270                 dev_err(&adapter->pdev->dev,
2271                         "INTx request IRQ failed - err %d\n", status);
2272                 return status;
2273         }
2274 done:
2275         adapter->isr_registered = true;
2276         return 0;
2277 }
2278
2279 static void be_irq_unregister(struct be_adapter *adapter)
2280 {
2281         struct net_device *netdev = adapter->netdev;
2282         struct be_rx_obj *rxo;
2283         int i;
2284
2285         if (!adapter->isr_registered)
2286                 return;
2287
2288         /* INTx */
2289         if (!msix_enabled(adapter)) {
2290                 free_irq(netdev->irq, adapter);
2291                 goto done;
2292         }
2293
2294         /* MSIx */
2295         be_free_irq(adapter, &adapter->tx_eq, adapter);
2296
2297         for_all_rx_queues(adapter, rxo, i)
2298                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2299
2300 done:
2301         adapter->isr_registered = false;
2302 }
2303
2304 static int be_close(struct net_device *netdev)
2305 {
2306         struct be_adapter *adapter = netdev_priv(netdev);
2307         struct be_rx_obj *rxo;
2308         struct be_tx_obj *txo;
2309         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2310         int vec, i;
2311
2312         be_async_mcc_disable(adapter);
2313
2314         netif_carrier_off(netdev);
2315         adapter->link_up = false;
2316
2317         if (!lancer_chip(adapter))
2318                 be_intr_set(adapter, false);
2319
2320         for_all_rx_queues(adapter, rxo, i)
2321                 napi_disable(&rxo->rx_eq.napi);
2322
2323         napi_disable(&tx_eq->napi);
2324
2325         if (lancer_chip(adapter)) {
2326                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2327                 for_all_rx_queues(adapter, rxo, i)
2328                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2329                 for_all_tx_queues(adapter, txo, i)
2330                          be_cq_notify(adapter, txo->cq.id, false, 0);
2331         }
2332
2333         if (msix_enabled(adapter)) {
2334                 vec = be_msix_vec_get(adapter, tx_eq);
2335                 synchronize_irq(vec);
2336
2337                 for_all_rx_queues(adapter, rxo, i) {
2338                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2339                         synchronize_irq(vec);
2340                 }
2341         } else {
2342                 synchronize_irq(netdev->irq);
2343         }
2344         be_irq_unregister(adapter);
2345
2346         /* Wait for all pending tx completions to arrive so that
2347          * all tx skbs are freed.
2348          */
2349         for_all_tx_queues(adapter, txo, i)
2350                 be_tx_compl_clean(adapter, txo);
2351
2352         return 0;
2353 }
2354
2355 static int be_open(struct net_device *netdev)
2356 {
2357         struct be_adapter *adapter = netdev_priv(netdev);
2358         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2359         struct be_rx_obj *rxo;
2360         bool link_up;
2361         int status, i;
2362         u8 mac_speed;
2363         u16 link_speed;
2364
2365         for_all_rx_queues(adapter, rxo, i) {
2366                 be_post_rx_frags(rxo, GFP_KERNEL);
2367                 napi_enable(&rxo->rx_eq.napi);
2368         }
2369         napi_enable(&tx_eq->napi);
2370
2371         be_irq_register(adapter);
2372
2373         if (!lancer_chip(adapter))
2374                 be_intr_set(adapter, true);
2375
2376         /* The evt queues are created in unarmed state; arm them */
2377         for_all_rx_queues(adapter, rxo, i) {
2378                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2379                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2380         }
2381         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2382
2383         /* Now that interrupts are on we can process async mcc */
2384         be_async_mcc_enable(adapter);
2385
2386         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2387                         &link_speed, 0);
2388         if (status)
2389                 goto err;
2390         be_link_status_update(adapter, link_up);
2391
2392         if (be_physfn(adapter)) {
2393                 status = be_vid_config(adapter, false, 0);
2394                 if (status)
2395                         goto err;
2396
2397                 status = be_cmd_set_flow_control(adapter,
2398                                 adapter->tx_fc, adapter->rx_fc);
2399                 if (status)
2400                         goto err;
2401         }
2402
2403         return 0;
2404 err:
2405         be_close(adapter->netdev);
2406         return -EIO;
2407 }
2408
2409 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2410 {
2411         struct be_dma_mem cmd;
2412         int status = 0;
2413         u8 mac[ETH_ALEN];
2414
2415         memset(mac, 0, ETH_ALEN);
2416
2417         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2418         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2419                                     GFP_KERNEL);
2420         if (cmd.va == NULL)
2421                 return -1;
2422         memset(cmd.va, 0, cmd.size);
2423
2424         if (enable) {
2425                 status = pci_write_config_dword(adapter->pdev,
2426                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2427                 if (status) {
2428                         dev_err(&adapter->pdev->dev,
2429                                 "Could not enable Wake-on-lan\n");
2430                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2431                                           cmd.dma);
2432                         return status;
2433                 }
2434                 status = be_cmd_enable_magic_wol(adapter,
2435                                 adapter->netdev->dev_addr, &cmd);
2436                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2437                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2438         } else {
2439                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2440                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2441                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2442         }
2443
2444         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2445         return status;
2446 }
2447
2448 /*
2449  * Generate a seed MAC address from the PF MAC Address using jhash.
2450  * MAC Address for VFs are assigned incrementally starting from the seed.
2451  * These addresses are programmed in the ASIC by the PF and the VF driver
2452  * queries for the MAC address during its probe.
2453  */
2454 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2455 {
2456         u32 vf = 0;
2457         int status = 0;
2458         u8 mac[ETH_ALEN];
2459
2460         be_vf_eth_addr_generate(adapter, mac);
2461
2462         for (vf = 0; vf < num_vfs; vf++) {
2463                 status = be_cmd_pmac_add(adapter, mac,
2464                                         adapter->vf_cfg[vf].vf_if_handle,
2465                                         &adapter->vf_cfg[vf].vf_pmac_id,
2466                                         vf + 1);
2467                 if (status)
2468                         dev_err(&adapter->pdev->dev,
2469                                 "Mac address add failed for VF %d\n", vf);
2470                 else
2471                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2472
2473                 mac[5] += 1;
2474         }
2475         return status;
2476 }
2477
2478 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2479 {
2480         u32 vf;
2481
2482         for (vf = 0; vf < num_vfs; vf++) {
2483                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2484                         be_cmd_pmac_del(adapter,
2485                                         adapter->vf_cfg[vf].vf_if_handle,
2486                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2487         }
2488 }
2489
2490 static int be_setup(struct be_adapter *adapter)
2491 {
2492         struct net_device *netdev = adapter->netdev;
2493         u32 cap_flags, en_flags, vf = 0;
2494         int status;
2495         u8 mac[ETH_ALEN];
2496
2497         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2498                                 BE_IF_FLAGS_BROADCAST |
2499                                 BE_IF_FLAGS_MULTICAST;
2500
2501         if (be_physfn(adapter)) {
2502                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2503                                 BE_IF_FLAGS_PROMISCUOUS |
2504                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2505                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2506
2507                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2508                         cap_flags |= BE_IF_FLAGS_RSS;
2509                         en_flags |= BE_IF_FLAGS_RSS;
2510                 }
2511         }
2512
2513         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2514                         netdev->dev_addr, false/* pmac_invalid */,
2515                         &adapter->if_handle, &adapter->pmac_id, 0);
2516         if (status != 0)
2517                 goto do_none;
2518
2519         if (be_physfn(adapter)) {
2520                 if (adapter->sriov_enabled) {
2521                         while (vf < num_vfs) {
2522                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2523                                                         BE_IF_FLAGS_BROADCAST;
2524                                 status = be_cmd_if_create(adapter, cap_flags,
2525                                         en_flags, mac, true,
2526                                         &adapter->vf_cfg[vf].vf_if_handle,
2527                                         NULL, vf+1);
2528                                 if (status) {
2529                                         dev_err(&adapter->pdev->dev,
2530                                         "Interface Create failed for VF %d\n",
2531                                         vf);
2532                                         goto if_destroy;
2533                                 }
2534                                 adapter->vf_cfg[vf].vf_pmac_id =
2535                                                         BE_INVALID_PMAC_ID;
2536                                 vf++;
2537                         }
2538                 }
2539         } else {
2540                 status = be_cmd_mac_addr_query(adapter, mac,
2541                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2542                 if (!status) {
2543                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2544                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2545                 }
2546         }
2547
2548         status = be_tx_queues_create(adapter);
2549         if (status != 0)
2550                 goto if_destroy;
2551
2552         status = be_rx_queues_create(adapter);
2553         if (status != 0)
2554                 goto tx_qs_destroy;
2555
2556         status = be_mcc_queues_create(adapter);
2557         if (status != 0)
2558                 goto rx_qs_destroy;
2559
2560         adapter->link_speed = -1;
2561
2562         return 0;
2563
2564 rx_qs_destroy:
2565         be_rx_queues_destroy(adapter);
2566 tx_qs_destroy:
2567         be_tx_queues_destroy(adapter);
2568 if_destroy:
2569         if (be_physfn(adapter) && adapter->sriov_enabled)
2570                 for (vf = 0; vf < num_vfs; vf++)
2571                         if (adapter->vf_cfg[vf].vf_if_handle)
2572                                 be_cmd_if_destroy(adapter,
2573                                         adapter->vf_cfg[vf].vf_if_handle,
2574                                         vf + 1);
2575         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2576 do_none:
2577         return status;
2578 }
2579
2580 static int be_clear(struct be_adapter *adapter)
2581 {
2582         int vf;
2583
2584         if (be_physfn(adapter) && adapter->sriov_enabled)
2585                 be_vf_eth_addr_rem(adapter);
2586
2587         be_mcc_queues_destroy(adapter);
2588         be_rx_queues_destroy(adapter);
2589         be_tx_queues_destroy(adapter);
2590         adapter->eq_next_idx = 0;
2591
2592         if (be_physfn(adapter) && adapter->sriov_enabled)
2593                 for (vf = 0; vf < num_vfs; vf++)
2594                         if (adapter->vf_cfg[vf].vf_if_handle)
2595                                 be_cmd_if_destroy(adapter,
2596                                         adapter->vf_cfg[vf].vf_if_handle,
2597                                         vf + 1);
2598
2599         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2600
2601         /* tell fw we're done with firing cmds */
2602         be_cmd_fw_clean(adapter);
2603         return 0;
2604 }
2605
2606
2607 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2608 static bool be_flash_redboot(struct be_adapter *adapter,
2609                         const u8 *p, u32 img_start, int image_size,
2610                         int hdr_size)
2611 {
2612         u32 crc_offset;
2613         u8 flashed_crc[4];
2614         int status;
2615
2616         crc_offset = hdr_size + img_start + image_size - 4;
2617
2618         p += crc_offset;
2619
2620         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2621                         (image_size - 4));
2622         if (status) {
2623                 dev_err(&adapter->pdev->dev,
2624                 "could not get crc from flash, not flashing redboot\n");
2625                 return false;
2626         }
2627
2628         /*update redboot only if crc does not match*/
2629         if (!memcmp(flashed_crc, p, 4))
2630                 return false;
2631         else
2632                 return true;
2633 }
2634
2635 static int be_flash_data(struct be_adapter *adapter,
2636                         const struct firmware *fw,
2637                         struct be_dma_mem *flash_cmd, int num_of_images)
2638
2639 {
2640         int status = 0, i, filehdr_size = 0;
2641         u32 total_bytes = 0, flash_op;
2642         int num_bytes;
2643         const u8 *p = fw->data;
2644         struct be_cmd_write_flashrom *req = flash_cmd->va;
2645         const struct flash_comp *pflashcomp;
2646         int num_comp;
2647
2648         static const struct flash_comp gen3_flash_types[9] = {
2649                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2650                         FLASH_IMAGE_MAX_SIZE_g3},
2651                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2652                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2653                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2654                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2655                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2656                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2657                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2658                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2659                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2660                         FLASH_IMAGE_MAX_SIZE_g3},
2661                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2662                         FLASH_IMAGE_MAX_SIZE_g3},
2663                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2664                         FLASH_IMAGE_MAX_SIZE_g3},
2665                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2666                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2667         };
2668         static const struct flash_comp gen2_flash_types[8] = {
2669                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2670                         FLASH_IMAGE_MAX_SIZE_g2},
2671                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2672                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2673                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2674                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2675                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2676                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2677                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2678                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2679                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2680                         FLASH_IMAGE_MAX_SIZE_g2},
2681                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2682                         FLASH_IMAGE_MAX_SIZE_g2},
2683                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2684                          FLASH_IMAGE_MAX_SIZE_g2}
2685         };
2686
2687         if (adapter->generation == BE_GEN3) {
2688                 pflashcomp = gen3_flash_types;
2689                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2690                 num_comp = ARRAY_SIZE(gen3_flash_types);
2691         } else {
2692                 pflashcomp = gen2_flash_types;
2693                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2694                 num_comp = ARRAY_SIZE(gen2_flash_types);
2695         }
2696         for (i = 0; i < num_comp; i++) {
2697                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2698                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2699                         continue;
2700                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2701                         (!be_flash_redboot(adapter, fw->data,
2702                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2703                         (num_of_images * sizeof(struct image_hdr)))))
2704                         continue;
2705                 p = fw->data;
2706                 p += filehdr_size + pflashcomp[i].offset
2707                         + (num_of_images * sizeof(struct image_hdr));
2708         if (p + pflashcomp[i].size > fw->data + fw->size)
2709                 return -1;
2710         total_bytes = pflashcomp[i].size;
2711                 while (total_bytes) {
2712                         if (total_bytes > 32*1024)
2713                                 num_bytes = 32*1024;
2714                         else
2715                                 num_bytes = total_bytes;
2716                         total_bytes -= num_bytes;
2717
2718                         if (!total_bytes)
2719                                 flash_op = FLASHROM_OPER_FLASH;
2720                         else
2721                                 flash_op = FLASHROM_OPER_SAVE;
2722                         memcpy(req->params.data_buf, p, num_bytes);
2723                         p += num_bytes;
2724                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2725                                 pflashcomp[i].optype, flash_op, num_bytes);
2726                         if (status) {
2727                                 dev_err(&adapter->pdev->dev,
2728                                         "cmd to write to flash rom failed.\n");
2729                                 return -1;
2730                         }
2731                 }
2732         }
2733         return 0;
2734 }
2735
2736 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2737 {
2738         if (fhdr == NULL)
2739                 return 0;
2740         if (fhdr->build[0] == '3')
2741                 return BE_GEN3;
2742         else if (fhdr->build[0] == '2')
2743                 return BE_GEN2;
2744         else
2745                 return 0;
2746 }
2747
2748 static int lancer_fw_download(struct be_adapter *adapter,
2749                                 const struct firmware *fw)
2750 {
2751 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2752 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2753         struct be_dma_mem flash_cmd;
2754         const u8 *data_ptr = NULL;
2755         u8 *dest_image_ptr = NULL;
2756         size_t image_size = 0;
2757         u32 chunk_size = 0;
2758         u32 data_written = 0;
2759         u32 offset = 0;
2760         int status = 0;
2761         u8 add_status = 0;
2762
2763         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2764                 dev_err(&adapter->pdev->dev,
2765                         "FW Image not properly aligned. "
2766                         "Length must be 4 byte aligned.\n");
2767                 status = -EINVAL;
2768                 goto lancer_fw_exit;
2769         }
2770
2771         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2772                                 + LANCER_FW_DOWNLOAD_CHUNK;
2773         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2774                                                 &flash_cmd.dma, GFP_KERNEL);
2775         if (!flash_cmd.va) {
2776                 status = -ENOMEM;
2777                 dev_err(&adapter->pdev->dev,
2778                         "Memory allocation failure while flashing\n");
2779                 goto lancer_fw_exit;
2780         }
2781
2782         dest_image_ptr = flash_cmd.va +
2783                                 sizeof(struct lancer_cmd_req_write_object);
2784         image_size = fw->size;
2785         data_ptr = fw->data;
2786
2787         while (image_size) {
2788                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2789
2790                 /* Copy the image chunk content. */
2791                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2792
2793                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2794                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2795                                 &data_written, &add_status);
2796
2797                 if (status)
2798                         break;
2799
2800                 offset += data_written;
2801                 data_ptr += data_written;
2802                 image_size -= data_written;
2803         }
2804
2805         if (!status) {
2806                 /* Commit the FW written */
2807                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2808                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2809                                         &data_written, &add_status);
2810         }
2811
2812         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2813                                 flash_cmd.dma);
2814         if (status) {
2815                 dev_err(&adapter->pdev->dev,
2816                         "Firmware load error. "
2817                         "Status code: 0x%x Additional Status: 0x%x\n",
2818                         status, add_status);
2819                 goto lancer_fw_exit;
2820         }
2821
2822         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2823 lancer_fw_exit:
2824         return status;
2825 }
2826
2827 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2828 {
2829         struct flash_file_hdr_g2 *fhdr;
2830         struct flash_file_hdr_g3 *fhdr3;
2831         struct image_hdr *img_hdr_ptr = NULL;
2832         struct be_dma_mem flash_cmd;
2833         const u8 *p;
2834         int status = 0, i = 0, num_imgs = 0;
2835
2836         p = fw->data;
2837         fhdr = (struct flash_file_hdr_g2 *) p;
2838
2839         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2840         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2841                                           &flash_cmd.dma, GFP_KERNEL);
2842         if (!flash_cmd.va) {
2843                 status = -ENOMEM;
2844                 dev_err(&adapter->pdev->dev,
2845                         "Memory allocation failure while flashing\n");
2846                 goto be_fw_exit;
2847         }
2848
2849         if ((adapter->generation == BE_GEN3) &&
2850                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2851                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2852                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2853                 for (i = 0; i < num_imgs; i++) {
2854                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2855                                         (sizeof(struct flash_file_hdr_g3) +
2856                                          i * sizeof(struct image_hdr)));
2857                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2858                                 status = be_flash_data(adapter, fw, &flash_cmd,
2859                                                         num_imgs);
2860                 }
2861         } else if ((adapter->generation == BE_GEN2) &&
2862                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2863                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2864         } else {
2865                 dev_err(&adapter->pdev->dev,
2866                         "UFI and Interface are not compatible for flashing\n");
2867                 status = -1;
2868         }
2869
2870         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2871                           flash_cmd.dma);
2872         if (status) {
2873                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2874                 goto be_fw_exit;
2875         }
2876
2877         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2878
2879 be_fw_exit:
2880         return status;
2881 }
2882
2883 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2884 {
2885         const struct firmware *fw;
2886         int status;
2887
2888         if (!netif_running(adapter->netdev)) {
2889                 dev_err(&adapter->pdev->dev,
2890                         "Firmware load not allowed (interface is down)\n");
2891                 return -1;
2892         }
2893
2894         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2895         if (status)
2896                 goto fw_exit;
2897
2898         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2899
2900         if (lancer_chip(adapter))
2901                 status = lancer_fw_download(adapter, fw);
2902         else
2903                 status = be_fw_download(adapter, fw);
2904
2905 fw_exit:
2906         release_firmware(fw);
2907         return status;
2908 }
2909
2910 static struct net_device_ops be_netdev_ops = {
2911         .ndo_open               = be_open,
2912         .ndo_stop               = be_close,
2913         .ndo_start_xmit         = be_xmit,
2914         .ndo_set_rx_mode        = be_set_multicast_list,
2915         .ndo_set_mac_address    = be_mac_addr_set,
2916         .ndo_change_mtu         = be_change_mtu,
2917         .ndo_validate_addr      = eth_validate_addr,
2918         .ndo_vlan_rx_register   = be_vlan_register,
2919         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2920         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2921         .ndo_set_vf_mac         = be_set_vf_mac,
2922         .ndo_set_vf_vlan        = be_set_vf_vlan,
2923         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2924         .ndo_get_vf_config      = be_get_vf_config
2925 };
2926
2927 static void be_netdev_init(struct net_device *netdev)
2928 {
2929         struct be_adapter *adapter = netdev_priv(netdev);
2930         struct be_rx_obj *rxo;
2931         int i;
2932
2933         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2934                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2935                 NETIF_F_HW_VLAN_TX;
2936         if (be_multi_rxq(adapter))
2937                 netdev->hw_features |= NETIF_F_RXHASH;
2938
2939         netdev->features |= netdev->hw_features |
2940                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2941
2942         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2943                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2944
2945         netdev->flags |= IFF_MULTICAST;
2946
2947         /* Default settings for Rx and Tx flow control */
2948         adapter->rx_fc = true;
2949         adapter->tx_fc = true;
2950
2951         netif_set_gso_max_size(netdev, 65535);
2952
2953         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2954
2955         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2956
2957         for_all_rx_queues(adapter, rxo, i)
2958                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2959                                 BE_NAPI_WEIGHT);
2960
2961         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2962                 BE_NAPI_WEIGHT);
2963 }
2964
2965 static void be_unmap_pci_bars(struct be_adapter *adapter)
2966 {
2967         if (adapter->csr)
2968                 iounmap(adapter->csr);
2969         if (adapter->db)
2970                 iounmap(adapter->db);
2971         if (adapter->pcicfg && be_physfn(adapter))
2972                 iounmap(adapter->pcicfg);
2973 }
2974
2975 static int be_map_pci_bars(struct be_adapter *adapter)
2976 {
2977         u8 __iomem *addr;
2978         int pcicfg_reg, db_reg;
2979
2980         if (lancer_chip(adapter)) {
2981                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2982                         pci_resource_len(adapter->pdev, 0));
2983                 if (addr == NULL)
2984                         return -ENOMEM;
2985                 adapter->db = addr;
2986                 return 0;
2987         }
2988
2989         if (be_physfn(adapter)) {
2990                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2991                                 pci_resource_len(adapter->pdev, 2));
2992                 if (addr == NULL)
2993                         return -ENOMEM;
2994                 adapter->csr = addr;
2995         }
2996
2997         if (adapter->generation == BE_GEN2) {
2998                 pcicfg_reg = 1;
2999                 db_reg = 4;
3000         } else {
3001                 pcicfg_reg = 0;
3002                 if (be_physfn(adapter))
3003                         db_reg = 4;
3004                 else
3005                         db_reg = 0;
3006         }
3007         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3008                                 pci_resource_len(adapter->pdev, db_reg));
3009         if (addr == NULL)
3010                 goto pci_map_err;
3011         adapter->db = addr;
3012
3013         if (be_physfn(adapter)) {
3014                 addr = ioremap_nocache(
3015                                 pci_resource_start(adapter->pdev, pcicfg_reg),
3016                                 pci_resource_len(adapter->pdev, pcicfg_reg));
3017                 if (addr == NULL)
3018                         goto pci_map_err;
3019                 adapter->pcicfg = addr;
3020         } else
3021                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
3022
3023         return 0;
3024 pci_map_err:
3025         be_unmap_pci_bars(adapter);
3026         return -ENOMEM;
3027 }
3028
3029
3030 static void be_ctrl_cleanup(struct be_adapter *adapter)
3031 {
3032         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3033
3034         be_unmap_pci_bars(adapter);
3035
3036         if (mem->va)
3037                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3038                                   mem->dma);
3039
3040         mem = &adapter->mc_cmd_mem;
3041         if (mem->va)
3042                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3043                                   mem->dma);
3044 }
3045
3046 static int be_ctrl_init(struct be_adapter *adapter)
3047 {
3048         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3049         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3050         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
3051         int status;
3052
3053         status = be_map_pci_bars(adapter);
3054         if (status)
3055                 goto done;
3056
3057         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3058         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3059                                                 mbox_mem_alloc->size,
3060                                                 &mbox_mem_alloc->dma,
3061                                                 GFP_KERNEL);
3062         if (!mbox_mem_alloc->va) {
3063                 status = -ENOMEM;
3064                 goto unmap_pci_bars;
3065         }
3066
3067         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3068         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3069         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3070         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3071
3072         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
3073         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3074                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
3075                                             GFP_KERNEL);
3076         if (mc_cmd_mem->va == NULL) {
3077                 status = -ENOMEM;
3078                 goto free_mbox;
3079         }
3080         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3081
3082         mutex_init(&adapter->mbox_lock);
3083         spin_lock_init(&adapter->mcc_lock);
3084         spin_lock_init(&adapter->mcc_cq_lock);
3085
3086         init_completion(&adapter->flash_compl);
3087         pci_save_state(adapter->pdev);
3088         return 0;
3089
3090 free_mbox:
3091         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3092                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3093
3094 unmap_pci_bars:
3095         be_unmap_pci_bars(adapter);
3096
3097 done:
3098         return status;
3099 }
3100
3101 static void be_stats_cleanup(struct be_adapter *adapter)
3102 {
3103         struct be_dma_mem *cmd = &adapter->stats_cmd;
3104
3105         if (cmd->va)
3106                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3107                                   cmd->va, cmd->dma);
3108 }
3109
3110 static int be_stats_init(struct be_adapter *adapter)
3111 {
3112         struct be_dma_mem *cmd = &adapter->stats_cmd;
3113
3114         if (adapter->generation == BE_GEN2) {
3115                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3116         } else {
3117                 if (lancer_chip(adapter))
3118                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3119                 else
3120                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3121         }
3122         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3123                                      GFP_KERNEL);
3124         if (cmd->va == NULL)
3125                 return -1;
3126         memset(cmd->va, 0, cmd->size);
3127         return 0;
3128 }
3129
3130 static void __devexit be_remove(struct pci_dev *pdev)
3131 {
3132         struct be_adapter *adapter = pci_get_drvdata(pdev);
3133
3134         if (!adapter)
3135                 return;
3136
3137         cancel_delayed_work_sync(&adapter->work);
3138
3139         unregister_netdev(adapter->netdev);
3140
3141         be_clear(adapter);
3142
3143         be_stats_cleanup(adapter);
3144
3145         be_ctrl_cleanup(adapter);
3146
3147         kfree(adapter->vf_cfg);
3148         be_sriov_disable(adapter);
3149
3150         be_msix_disable(adapter);
3151
3152         pci_set_drvdata(pdev, NULL);
3153         pci_release_regions(pdev);
3154         pci_disable_device(pdev);
3155
3156         free_netdev(adapter->netdev);
3157 }
3158
3159 static int be_get_config(struct be_adapter *adapter)
3160 {
3161         int status;
3162         u8 mac[ETH_ALEN];
3163
3164         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3165         if (status)
3166                 return status;
3167
3168         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3169                         &adapter->function_mode, &adapter->function_caps);
3170         if (status)
3171                 return status;
3172
3173         memset(mac, 0, ETH_ALEN);
3174
3175         /* A default permanent address is given to each VF for Lancer*/
3176         if (be_physfn(adapter) || lancer_chip(adapter)) {
3177                 status = be_cmd_mac_addr_query(adapter, mac,
3178                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3179
3180                 if (status)
3181                         return status;
3182
3183                 if (!is_valid_ether_addr(mac))
3184                         return -EADDRNOTAVAIL;
3185
3186                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3187                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3188         }
3189
3190         if (adapter->function_mode & 0x400)
3191                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3192         else
3193                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3194
3195         status = be_cmd_get_cntl_attributes(adapter);
3196         if (status)
3197                 return status;
3198
3199         be_cmd_check_native_mode(adapter);
3200
3201         if ((num_vfs && adapter->sriov_enabled) ||
3202                 (adapter->function_mode & 0x400) ||
3203                 lancer_chip(adapter) || !be_physfn(adapter)) {
3204                 adapter->num_tx_qs = 1;
3205                 netif_set_real_num_tx_queues(adapter->netdev,
3206                         adapter->num_tx_qs);
3207         } else {
3208                 adapter->num_tx_qs = MAX_TX_QS;
3209         }
3210
3211         return 0;
3212 }
3213
3214 static int be_dev_family_check(struct be_adapter *adapter)
3215 {
3216         struct pci_dev *pdev = adapter->pdev;
3217         u32 sli_intf = 0, if_type;
3218
3219         switch (pdev->device) {
3220         case BE_DEVICE_ID1:
3221         case OC_DEVICE_ID1:
3222                 adapter->generation = BE_GEN2;
3223                 break;
3224         case BE_DEVICE_ID2:
3225         case OC_DEVICE_ID2:
3226                 adapter->generation = BE_GEN3;
3227                 break;
3228         case OC_DEVICE_ID3:
3229         case OC_DEVICE_ID4:
3230                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3231                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3232                                                 SLI_INTF_IF_TYPE_SHIFT;
3233
3234                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3235                         if_type != 0x02) {
3236                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3237                         return -EINVAL;
3238                 }
3239                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3240                                          SLI_INTF_FAMILY_SHIFT);
3241                 adapter->generation = BE_GEN3;
3242                 break;
3243         default:
3244                 adapter->generation = 0;
3245         }
3246         return 0;
3247 }
3248
3249 static int lancer_wait_ready(struct be_adapter *adapter)
3250 {
3251 #define SLIPORT_READY_TIMEOUT 500
3252         u32 sliport_status;
3253         int status = 0, i;
3254
3255         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3256                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3257                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3258                         break;
3259
3260                 msleep(20);
3261         }
3262
3263         if (i == SLIPORT_READY_TIMEOUT)
3264                 status = -1;
3265
3266         return status;
3267 }
3268
3269 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3270 {
3271         int status;
3272         u32 sliport_status, err, reset_needed;
3273         status = lancer_wait_ready(adapter);
3274         if (!status) {
3275                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3276                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3277                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3278                 if (err && reset_needed) {
3279                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3280                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3281
3282                         /* check adapter has corrected the error */
3283                         status = lancer_wait_ready(adapter);
3284                         sliport_status = ioread32(adapter->db +
3285                                                         SLIPORT_STATUS_OFFSET);
3286                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3287                                                 SLIPORT_STATUS_RN_MASK);
3288                         if (status || sliport_status)
3289                                 status = -1;
3290                 } else if (err || reset_needed) {
3291                         status = -1;
3292                 }
3293         }
3294         return status;
3295 }
3296
3297 static int __devinit be_probe(struct pci_dev *pdev,
3298                         const struct pci_device_id *pdev_id)
3299 {
3300         int status = 0;
3301         struct be_adapter *adapter;
3302         struct net_device *netdev;
3303
3304         status = pci_enable_device(pdev);
3305         if (status)
3306                 goto do_none;
3307
3308         status = pci_request_regions(pdev, DRV_NAME);
3309         if (status)
3310                 goto disable_dev;
3311         pci_set_master(pdev);
3312
3313         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3314         if (netdev == NULL) {
3315                 status = -ENOMEM;
3316                 goto rel_reg;
3317         }
3318         adapter = netdev_priv(netdev);
3319         adapter->pdev = pdev;
3320         pci_set_drvdata(pdev, adapter);
3321
3322         status = be_dev_family_check(adapter);
3323         if (status)
3324                 goto free_netdev;
3325
3326         adapter->netdev = netdev;
3327         SET_NETDEV_DEV(netdev, &pdev->dev);
3328
3329         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3330         if (!status) {
3331                 netdev->features |= NETIF_F_HIGHDMA;
3332         } else {
3333                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3334                 if (status) {
3335                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3336                         goto free_netdev;
3337                 }
3338         }
3339
3340         be_sriov_enable(adapter);
3341         if (adapter->sriov_enabled) {
3342                 adapter->vf_cfg = kcalloc(num_vfs,
3343                         sizeof(struct be_vf_cfg), GFP_KERNEL);
3344
3345                 if (!adapter->vf_cfg)
3346                         goto free_netdev;
3347         }
3348
3349         status = be_ctrl_init(adapter);
3350         if (status)
3351                 goto free_vf_cfg;
3352
3353         if (lancer_chip(adapter)) {
3354                 status = lancer_test_and_set_rdy_state(adapter);
3355                 if (status) {
3356                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3357                         goto ctrl_clean;
3358                 }
3359         }
3360
3361         /* sync up with fw's ready state */
3362         if (be_physfn(adapter)) {
3363                 status = be_cmd_POST(adapter);
3364                 if (status)
3365                         goto ctrl_clean;
3366         }
3367
3368         /* tell fw we're ready to fire cmds */
3369         status = be_cmd_fw_init(adapter);
3370         if (status)
3371                 goto ctrl_clean;
3372
3373         status = be_cmd_reset_function(adapter);
3374         if (status)
3375                 goto ctrl_clean;
3376
3377         status = be_stats_init(adapter);
3378         if (status)
3379                 goto ctrl_clean;
3380
3381         status = be_get_config(adapter);
3382         if (status)
3383                 goto stats_clean;
3384
3385         be_msix_enable(adapter);
3386
3387         INIT_DELAYED_WORK(&adapter->work, be_worker);
3388
3389         status = be_setup(adapter);
3390         if (status)
3391                 goto msix_disable;
3392
3393         be_netdev_init(netdev);
3394         status = register_netdev(netdev);
3395         if (status != 0)
3396                 goto unsetup;
3397         netif_carrier_off(netdev);
3398
3399         if (be_physfn(adapter) && adapter->sriov_enabled) {
3400                 u8 mac_speed;
3401                 bool link_up;
3402                 u16 vf, lnk_speed;
3403
3404                 if (!lancer_chip(adapter)) {
3405                         status = be_vf_eth_addr_config(adapter);
3406                         if (status)
3407                                 goto unreg_netdev;
3408                 }
3409
3410                 for (vf = 0; vf < num_vfs; vf++) {
3411                         status = be_cmd_link_status_query(adapter, &link_up,
3412                                         &mac_speed, &lnk_speed, vf + 1);
3413                         if (!status)
3414                                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3415                         else
3416                                 goto unreg_netdev;
3417                 }
3418         }
3419
3420         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3421         /* By default all priorities are enabled.
3422          * Needed in case of no GRP5 evt support
3423          */
3424         adapter->vlan_prio_bmap = 0xff;
3425
3426         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3427         return 0;
3428
3429 unreg_netdev:
3430         unregister_netdev(netdev);
3431 unsetup:
3432         be_clear(adapter);
3433 msix_disable:
3434         be_msix_disable(adapter);
3435 stats_clean:
3436         be_stats_cleanup(adapter);
3437 ctrl_clean:
3438         be_ctrl_cleanup(adapter);
3439 free_vf_cfg:
3440         kfree(adapter->vf_cfg);
3441 free_netdev:
3442         be_sriov_disable(adapter);
3443         free_netdev(netdev);
3444         pci_set_drvdata(pdev, NULL);
3445 rel_reg:
3446         pci_release_regions(pdev);
3447 disable_dev:
3448         pci_disable_device(pdev);
3449 do_none:
3450         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3451         return status;
3452 }
3453
3454 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3455 {
3456         struct be_adapter *adapter = pci_get_drvdata(pdev);
3457         struct net_device *netdev =  adapter->netdev;
3458
3459         cancel_delayed_work_sync(&adapter->work);
3460         if (adapter->wol)
3461                 be_setup_wol(adapter, true);
3462
3463         netif_device_detach(netdev);
3464         if (netif_running(netdev)) {
3465                 rtnl_lock();
3466                 be_close(netdev);
3467                 rtnl_unlock();
3468         }
3469         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3470         be_clear(adapter);
3471
3472         be_msix_disable(adapter);
3473         pci_save_state(pdev);
3474         pci_disable_device(pdev);
3475         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3476         return 0;
3477 }
3478
3479 static int be_resume(struct pci_dev *pdev)
3480 {
3481         int status = 0;
3482         struct be_adapter *adapter = pci_get_drvdata(pdev);
3483         struct net_device *netdev =  adapter->netdev;
3484
3485         netif_device_detach(netdev);
3486
3487         status = pci_enable_device(pdev);
3488         if (status)
3489                 return status;
3490
3491         pci_set_power_state(pdev, 0);
3492         pci_restore_state(pdev);
3493
3494         be_msix_enable(adapter);
3495         /* tell fw we're ready to fire cmds */
3496         status = be_cmd_fw_init(adapter);
3497         if (status)
3498                 return status;
3499
3500         be_setup(adapter);
3501         if (netif_running(netdev)) {
3502                 rtnl_lock();
3503                 be_open(netdev);
3504                 rtnl_unlock();
3505         }
3506         netif_device_attach(netdev);
3507
3508         if (adapter->wol)
3509                 be_setup_wol(adapter, false);
3510
3511         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3512         return 0;
3513 }
3514
3515 /*
3516  * An FLR will stop BE from DMAing any data.
3517  */
3518 static void be_shutdown(struct pci_dev *pdev)
3519 {
3520         struct be_adapter *adapter = pci_get_drvdata(pdev);
3521
3522         if (!adapter)
3523                 return;
3524
3525         cancel_delayed_work_sync(&adapter->work);
3526
3527         netif_device_detach(adapter->netdev);
3528
3529         if (adapter->wol)
3530                 be_setup_wol(adapter, true);
3531
3532         be_cmd_reset_function(adapter);
3533
3534         pci_disable_device(pdev);
3535 }
3536
3537 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3538                                 pci_channel_state_t state)
3539 {
3540         struct be_adapter *adapter = pci_get_drvdata(pdev);
3541         struct net_device *netdev =  adapter->netdev;
3542
3543         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3544
3545         adapter->eeh_err = true;
3546
3547         netif_device_detach(netdev);
3548
3549         if (netif_running(netdev)) {
3550                 rtnl_lock();
3551                 be_close(netdev);
3552                 rtnl_unlock();
3553         }
3554         be_clear(adapter);
3555
3556         if (state == pci_channel_io_perm_failure)
3557                 return PCI_ERS_RESULT_DISCONNECT;
3558
3559         pci_disable_device(pdev);
3560
3561         return PCI_ERS_RESULT_NEED_RESET;
3562 }
3563
3564 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3565 {
3566         struct be_adapter *adapter = pci_get_drvdata(pdev);
3567         int status;
3568
3569         dev_info(&adapter->pdev->dev, "EEH reset\n");
3570         adapter->eeh_err = false;
3571
3572         status = pci_enable_device(pdev);
3573         if (status)
3574                 return PCI_ERS_RESULT_DISCONNECT;
3575
3576         pci_set_master(pdev);
3577         pci_set_power_state(pdev, 0);
3578         pci_restore_state(pdev);
3579
3580         /* Check if card is ok and fw is ready */
3581         status = be_cmd_POST(adapter);
3582         if (status)
3583                 return PCI_ERS_RESULT_DISCONNECT;
3584
3585         return PCI_ERS_RESULT_RECOVERED;
3586 }
3587
3588 static void be_eeh_resume(struct pci_dev *pdev)
3589 {
3590         int status = 0;
3591         struct be_adapter *adapter = pci_get_drvdata(pdev);
3592         struct net_device *netdev =  adapter->netdev;
3593
3594         dev_info(&adapter->pdev->dev, "EEH resume\n");
3595
3596         pci_save_state(pdev);
3597
3598         /* tell fw we're ready to fire cmds */
3599         status = be_cmd_fw_init(adapter);
3600         if (status)
3601                 goto err;
3602
3603         status = be_setup(adapter);
3604         if (status)
3605                 goto err;
3606
3607         if (netif_running(netdev)) {
3608                 status = be_open(netdev);
3609                 if (status)
3610                         goto err;
3611         }
3612         netif_device_attach(netdev);
3613         return;
3614 err:
3615         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3616 }
3617
3618 static struct pci_error_handlers be_eeh_handlers = {
3619         .error_detected = be_eeh_err_detected,
3620         .slot_reset = be_eeh_reset,
3621         .resume = be_eeh_resume,
3622 };
3623
3624 static struct pci_driver be_driver = {
3625         .name = DRV_NAME,
3626         .id_table = be_dev_ids,
3627         .probe = be_probe,
3628         .remove = be_remove,
3629         .suspend = be_suspend,
3630         .resume = be_resume,
3631         .shutdown = be_shutdown,
3632         .err_handler = &be_eeh_handlers
3633 };
3634
3635 static int __init be_init_module(void)
3636 {
3637         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3638             rx_frag_size != 2048) {
3639                 printk(KERN_WARNING DRV_NAME
3640                         " : Module param rx_frag_size must be 2048/4096/8192."
3641                         " Using 2048\n");
3642                 rx_frag_size = 2048;
3643         }
3644
3645         return pci_register_driver(&be_driver);
3646 }
3647 module_init(be_init_module);
3648
3649 static void __exit be_exit_module(void)
3650 {
3651         pci_unregister_driver(&be_driver);
3652 }
3653 module_exit(be_exit_module);