be2net: fix a race in be_xmit()
[firefly-linux-kernel-4.4.55.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
22
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
28
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static bool multi_rxq = true;
37 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39
40 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
47         { 0 }
48 };
49 MODULE_DEVICE_TABLE(pci, be_dev_ids);
50 /* UE Status Low CSR */
51 static char *ue_status_low_desc[] = {
52         "CEV",
53         "CTX",
54         "DBUF",
55         "ERX",
56         "Host",
57         "MPU",
58         "NDMA",
59         "PTC ",
60         "RDMA ",
61         "RXF ",
62         "RXIPS ",
63         "RXULP0 ",
64         "RXULP1 ",
65         "RXULP2 ",
66         "TIM ",
67         "TPOST ",
68         "TPRE ",
69         "TXIPS ",
70         "TXULP0 ",
71         "TXULP1 ",
72         "UC ",
73         "WDMA ",
74         "TXULP2 ",
75         "HOST1 ",
76         "P0_OB_LINK ",
77         "P1_OB_LINK ",
78         "HOST_GPIO ",
79         "MBOX ",
80         "AXGMAC0",
81         "AXGMAC1",
82         "JTAG",
83         "MPU_INTPEND"
84 };
85 /* UE Status High CSR */
86 static char *ue_status_hi_desc[] = {
87         "LPCMEMHOST",
88         "MGMT_MAC",
89         "PCS0ONLINE",
90         "MPU_IRAM",
91         "PCS1ONLINE",
92         "PCTL0",
93         "PCTL1",
94         "PMEM",
95         "RR",
96         "TXPB",
97         "RXPP",
98         "XAUI",
99         "TXP",
100         "ARM",
101         "IPC",
102         "HOST2",
103         "HOST3",
104         "HOST4",
105         "HOST5",
106         "HOST6",
107         "HOST7",
108         "HOST8",
109         "HOST9",
110         "NETC"
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown"
119 };
120
121 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
122 {
123         struct be_dma_mem *mem = &q->dma_mem;
124         if (mem->va)
125                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
126                                   mem->dma);
127 }
128
129 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
130                 u16 len, u16 entry_size)
131 {
132         struct be_dma_mem *mem = &q->dma_mem;
133
134         memset(q, 0, sizeof(*q));
135         q->len = len;
136         q->entry_size = entry_size;
137         mem->size = len * entry_size;
138         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
139                                      GFP_KERNEL);
140         if (!mem->va)
141                 return -1;
142         memset(mem->va, 0, mem->size);
143         return 0;
144 }
145
146 static void be_intr_set(struct be_adapter *adapter, bool enable)
147 {
148         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
149         u32 reg = ioread32(addr);
150         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
151
152         if (adapter->eeh_err)
153                 return;
154
155         if (!enabled && enable)
156                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157         else if (enabled && !enable)
158                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159         else
160                 return;
161
162         iowrite32(reg, addr);
163 }
164
165 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
166 {
167         u32 val = 0;
168         val |= qid & DB_RQ_RING_ID_MASK;
169         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
170
171         wmb();
172         iowrite32(val, adapter->db + DB_RQ_OFFSET);
173 }
174
175 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
176 {
177         u32 val = 0;
178         val |= qid & DB_TXULP_RING_ID_MASK;
179         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
180
181         wmb();
182         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
183 }
184
185 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
186                 bool arm, bool clear_int, u16 num_popped)
187 {
188         u32 val = 0;
189         val |= qid & DB_EQ_RING_ID_MASK;
190         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
191                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
192
193         if (adapter->eeh_err)
194                 return;
195
196         if (arm)
197                 val |= 1 << DB_EQ_REARM_SHIFT;
198         if (clear_int)
199                 val |= 1 << DB_EQ_CLR_SHIFT;
200         val |= 1 << DB_EQ_EVNT_SHIFT;
201         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
202         iowrite32(val, adapter->db + DB_EQ_OFFSET);
203 }
204
205 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
206 {
207         u32 val = 0;
208         val |= qid & DB_CQ_RING_ID_MASK;
209         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
210                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
211
212         if (adapter->eeh_err)
213                 return;
214
215         if (arm)
216                 val |= 1 << DB_CQ_REARM_SHIFT;
217         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
218         iowrite32(val, adapter->db + DB_CQ_OFFSET);
219 }
220
221 static int be_mac_addr_set(struct net_device *netdev, void *p)
222 {
223         struct be_adapter *adapter = netdev_priv(netdev);
224         struct sockaddr *addr = p;
225         int status = 0;
226
227         if (!is_valid_ether_addr(addr->sa_data))
228                 return -EADDRNOTAVAIL;
229
230         /* MAC addr configuration will be done in hardware for VFs
231          * by their corresponding PFs. Just copy to netdev addr here
232          */
233         if (!be_physfn(adapter))
234                 goto netdev_addr;
235
236         status = be_cmd_pmac_del(adapter, adapter->if_handle,
237                                 adapter->pmac_id, 0);
238         if (status)
239                 return status;
240
241         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
242                                 adapter->if_handle, &adapter->pmac_id, 0);
243 netdev_addr:
244         if (!status)
245                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
246
247         return status;
248 }
249
250 static void populate_be2_stats(struct be_adapter *adapter)
251 {
252
253         struct be_drv_stats *drvs = &adapter->drv_stats;
254         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
255         struct be_port_rxf_stats_v0 *port_stats =
256                 be_port_rxf_stats_from_cmd(adapter);
257         struct be_rxf_stats_v0 *rxf_stats =
258                 be_rxf_stats_from_cmd(adapter);
259
260         drvs->rx_pause_frames = port_stats->rx_pause_frames;
261         drvs->rx_crc_errors = port_stats->rx_crc_errors;
262         drvs->rx_control_frames = port_stats->rx_control_frames;
263         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
264         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
265         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
266         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
267         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
268         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
269         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
270         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
271         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
272         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
273         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
274         drvs->rx_input_fifo_overflow_drop =
275                 port_stats->rx_input_fifo_overflow;
276         drvs->rx_dropped_header_too_small =
277                 port_stats->rx_dropped_header_too_small;
278         drvs->rx_address_match_errors =
279                 port_stats->rx_address_match_errors;
280         drvs->rx_alignment_symbol_errors =
281                 port_stats->rx_alignment_symbol_errors;
282
283         drvs->tx_pauseframes = port_stats->tx_pauseframes;
284         drvs->tx_controlframes = port_stats->tx_controlframes;
285
286         if (adapter->port_num)
287                 drvs->jabber_events =
288                         rxf_stats->port1_jabber_events;
289         else
290                 drvs->jabber_events =
291                         rxf_stats->port0_jabber_events;
292         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
293         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
294         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
295         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
296         drvs->forwarded_packets = rxf_stats->forwarded_packets;
297         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
298         drvs->rx_drops_no_tpre_descr =
299                 rxf_stats->rx_drops_no_tpre_descr;
300         drvs->rx_drops_too_many_frags =
301                 rxf_stats->rx_drops_too_many_frags;
302         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
303 }
304
305 static void populate_be3_stats(struct be_adapter *adapter)
306 {
307         struct be_drv_stats *drvs = &adapter->drv_stats;
308         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
309
310         struct be_rxf_stats_v1 *rxf_stats =
311                 be_rxf_stats_from_cmd(adapter);
312         struct be_port_rxf_stats_v1 *port_stats =
313                 be_port_rxf_stats_from_cmd(adapter);
314
315         drvs->rx_priority_pause_frames = 0;
316         drvs->pmem_fifo_overflow_drop = 0;
317         drvs->rx_pause_frames = port_stats->rx_pause_frames;
318         drvs->rx_crc_errors = port_stats->rx_crc_errors;
319         drvs->rx_control_frames = port_stats->rx_control_frames;
320         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
321         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
322         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
323         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
324         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
325         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
326         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
327         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
328         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
329         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
330         drvs->rx_dropped_header_too_small =
331                 port_stats->rx_dropped_header_too_small;
332         drvs->rx_input_fifo_overflow_drop =
333                 port_stats->rx_input_fifo_overflow_drop;
334         drvs->rx_address_match_errors =
335                 port_stats->rx_address_match_errors;
336         drvs->rx_alignment_symbol_errors =
337                 port_stats->rx_alignment_symbol_errors;
338         drvs->rxpp_fifo_overflow_drop =
339                 port_stats->rxpp_fifo_overflow_drop;
340         drvs->tx_pauseframes = port_stats->tx_pauseframes;
341         drvs->tx_controlframes = port_stats->tx_controlframes;
342         drvs->jabber_events = port_stats->jabber_events;
343         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347         drvs->forwarded_packets = rxf_stats->forwarded_packets;
348         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
349         drvs->rx_drops_no_tpre_descr =
350                 rxf_stats->rx_drops_no_tpre_descr;
351         drvs->rx_drops_too_many_frags =
352                 rxf_stats->rx_drops_too_many_frags;
353         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354 }
355
356 static void populate_lancer_stats(struct be_adapter *adapter)
357 {
358
359         struct be_drv_stats *drvs = &adapter->drv_stats;
360         struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
361                                                 (adapter);
362         drvs->rx_priority_pause_frames = 0;
363         drvs->pmem_fifo_overflow_drop = 0;
364         drvs->rx_pause_frames =
365                 make_64bit_val(pport_stats->rx_pause_frames_lo,
366                                  pport_stats->rx_pause_frames_hi);
367         drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
368                                                 pport_stats->rx_crc_errors_lo);
369         drvs->rx_control_frames =
370                         make_64bit_val(pport_stats->rx_control_frames_hi,
371                         pport_stats->rx_control_frames_lo);
372         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
373         drvs->rx_frame_too_long =
374                 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
375                                         pport_stats->rx_frames_too_long_lo);
376         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
377         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
378         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
379         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
380         drvs->rx_dropped_tcp_length =
381                                 pport_stats->rx_dropped_invalid_tcp_length;
382         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
383         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
384         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
385         drvs->rx_dropped_header_too_small =
386                                 pport_stats->rx_dropped_header_too_small;
387         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
389         drvs->rx_alignment_symbol_errors =
390                 make_64bit_val(pport_stats->rx_symbol_errors_hi,
391                                 pport_stats->rx_symbol_errors_lo);
392         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
393         drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
394                                         pport_stats->tx_pause_frames_lo);
395         drvs->tx_controlframes =
396                 make_64bit_val(pport_stats->tx_control_frames_hi,
397                                 pport_stats->tx_control_frames_lo);
398         drvs->jabber_events = pport_stats->rx_jabbers;
399         drvs->rx_drops_no_pbuf = 0;
400         drvs->rx_drops_no_txpb = 0;
401         drvs->rx_drops_no_erx_descr = 0;
402         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
403         drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
404                                                 pport_stats->num_forwards_lo);
405         drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
406                                                 pport_stats->rx_drops_mtu_lo);
407         drvs->rx_drops_no_tpre_descr = 0;
408         drvs->rx_drops_too_many_frags =
409                 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
410                                 pport_stats->rx_drops_too_many_frags_lo);
411 }
412
413 void be_parse_stats(struct be_adapter *adapter)
414 {
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423 }
424
425 void netdev_stats_update(struct be_adapter *adapter)
426 {
427         struct be_drv_stats *drvs = &adapter->drv_stats;
428         struct net_device_stats *dev_stats = &adapter->netdev->stats;
429         struct be_rx_obj *rxo;
430         int i;
431
432         memset(dev_stats, 0, sizeof(*dev_stats));
433         for_all_rx_queues(adapter, rxo, i) {
434                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
435                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
436                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
437                 /*  no space in linux buffers: best possible approximation */
438                 if (adapter->generation == BE_GEN3) {
439                         if (!(lancer_chip(adapter))) {
440                                 struct be_erx_stats_v1 *erx_stats =
441                                         be_erx_stats_from_cmd(adapter);
442                                 dev_stats->rx_dropped +=
443                                 erx_stats->rx_drops_no_fragments[rxo->q.id];
444                         }
445                 } else {
446                         struct be_erx_stats_v0 *erx_stats =
447                                         be_erx_stats_from_cmd(adapter);
448                         dev_stats->rx_dropped +=
449                                 erx_stats->rx_drops_no_fragments[rxo->q.id];
450                 }
451         }
452
453         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
454         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
455
456         /* bad pkts received */
457         dev_stats->rx_errors = drvs->rx_crc_errors +
458                 drvs->rx_alignment_symbol_errors +
459                 drvs->rx_in_range_errors +
460                 drvs->rx_out_range_errors +
461                 drvs->rx_frame_too_long +
462                 drvs->rx_dropped_too_small +
463                 drvs->rx_dropped_too_short +
464                 drvs->rx_dropped_header_too_small +
465                 drvs->rx_dropped_tcp_length +
466                 drvs->rx_dropped_runt +
467                 drvs->rx_tcp_checksum_errs +
468                 drvs->rx_ip_checksum_errs +
469                 drvs->rx_udp_checksum_errs;
470
471         /* detailed rx errors */
472         dev_stats->rx_length_errors = drvs->rx_in_range_errors +
473                 drvs->rx_out_range_errors +
474                 drvs->rx_frame_too_long;
475
476         dev_stats->rx_crc_errors = drvs->rx_crc_errors;
477
478         /* frame alignment errors */
479         dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
480
481         /* receiver fifo overrun */
482         /* drops_no_pbuf is no per i/f, it's per BE card */
483         dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
484                                 drvs->rx_input_fifo_overflow_drop +
485                                 drvs->rx_drops_no_pbuf;
486 }
487
488 void be_link_status_update(struct be_adapter *adapter, bool link_up)
489 {
490         struct net_device *netdev = adapter->netdev;
491
492         /* If link came up or went down */
493         if (adapter->link_up != link_up) {
494                 adapter->link_speed = -1;
495                 if (link_up) {
496                         netif_carrier_on(netdev);
497                         printk(KERN_INFO "%s: Link up\n", netdev->name);
498                 } else {
499                         netif_carrier_off(netdev);
500                         printk(KERN_INFO "%s: Link down\n", netdev->name);
501                 }
502                 adapter->link_up = link_up;
503         }
504 }
505
506 /* Update the EQ delay n BE based on the RX frags consumed / sec */
507 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
508 {
509         struct be_eq_obj *rx_eq = &rxo->rx_eq;
510         struct be_rx_stats *stats = &rxo->stats;
511         ulong now = jiffies;
512         u32 eqd;
513
514         if (!rx_eq->enable_aic)
515                 return;
516
517         /* Wrapped around */
518         if (time_before(now, stats->rx_fps_jiffies)) {
519                 stats->rx_fps_jiffies = now;
520                 return;
521         }
522
523         /* Update once a second */
524         if ((now - stats->rx_fps_jiffies) < HZ)
525                 return;
526
527         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
528                         ((now - stats->rx_fps_jiffies) / HZ);
529
530         stats->rx_fps_jiffies = now;
531         stats->prev_rx_frags = stats->rx_frags;
532         eqd = stats->rx_fps / 110000;
533         eqd = eqd << 3;
534         if (eqd > rx_eq->max_eqd)
535                 eqd = rx_eq->max_eqd;
536         if (eqd < rx_eq->min_eqd)
537                 eqd = rx_eq->min_eqd;
538         if (eqd < 10)
539                 eqd = 0;
540         if (eqd != rx_eq->cur_eqd)
541                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
542
543         rx_eq->cur_eqd = eqd;
544 }
545
546 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
547 {
548         u64 rate = bytes;
549
550         do_div(rate, ticks / HZ);
551         rate <<= 3;                     /* bytes/sec -> bits/sec */
552         do_div(rate, 1000000ul);        /* MB/Sec */
553
554         return rate;
555 }
556
557 static void be_tx_rate_update(struct be_adapter *adapter)
558 {
559         struct be_tx_stats *stats = tx_stats(adapter);
560         ulong now = jiffies;
561
562         /* Wrapped around? */
563         if (time_before(now, stats->be_tx_jiffies)) {
564                 stats->be_tx_jiffies = now;
565                 return;
566         }
567
568         /* Update tx rate once in two seconds */
569         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
570                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
571                                                   - stats->be_tx_bytes_prev,
572                                                  now - stats->be_tx_jiffies);
573                 stats->be_tx_jiffies = now;
574                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
575         }
576 }
577
578 static void be_tx_stats_update(struct be_adapter *adapter,
579                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
580 {
581         struct be_tx_stats *stats = tx_stats(adapter);
582         stats->be_tx_reqs++;
583         stats->be_tx_wrbs += wrb_cnt;
584         stats->be_tx_bytes += copied;
585         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
586         if (stopped)
587                 stats->be_tx_stops++;
588 }
589
590 /* Determine number of WRB entries needed to xmit data in an skb */
591 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
592                                                                 bool *dummy)
593 {
594         int cnt = (skb->len > skb->data_len);
595
596         cnt += skb_shinfo(skb)->nr_frags;
597
598         /* to account for hdr wrb */
599         cnt++;
600         if (lancer_chip(adapter) || !(cnt & 1)) {
601                 *dummy = false;
602         } else {
603                 /* add a dummy to make it an even num */
604                 cnt++;
605                 *dummy = true;
606         }
607         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
608         return cnt;
609 }
610
611 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
612 {
613         wrb->frag_pa_hi = upper_32_bits(addr);
614         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
615         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
616 }
617
618 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
619                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
620 {
621         u8 vlan_prio = 0;
622         u16 vlan_tag = 0;
623
624         memset(hdr, 0, sizeof(*hdr));
625
626         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
627
628         if (skb_is_gso(skb)) {
629                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
630                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
631                         hdr, skb_shinfo(skb)->gso_size);
632                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
633                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
634                 if (lancer_chip(adapter) && adapter->sli_family  ==
635                                                         LANCER_A0_SLI_FAMILY) {
636                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
637                         if (is_tcp_pkt(skb))
638                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
639                                                                 tcpcs, hdr, 1);
640                         else if (is_udp_pkt(skb))
641                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
642                                                                 udpcs, hdr, 1);
643                 }
644         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
645                 if (is_tcp_pkt(skb))
646                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
647                 else if (is_udp_pkt(skb))
648                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
649         }
650
651         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
652                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
653                 vlan_tag = vlan_tx_tag_get(skb);
654                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
655                 /* If vlan priority provided by OS is NOT in available bmap */
656                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
657                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
658                                         adapter->recommended_prio;
659                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
660         }
661
662         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666 }
667
668 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
669                 bool unmap_single)
670 {
671         dma_addr_t dma;
672
673         be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
676         if (wrb->frag_len) {
677                 if (unmap_single)
678                         dma_unmap_single(dev, dma, wrb->frag_len,
679                                          DMA_TO_DEVICE);
680                 else
681                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
682         }
683 }
684
685 static int make_tx_wrbs(struct be_adapter *adapter,
686                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687 {
688         dma_addr_t busaddr;
689         int i, copied = 0;
690         struct device *dev = &adapter->pdev->dev;
691         struct sk_buff *first_skb = skb;
692         struct be_queue_info *txq = &adapter->tx_obj.q;
693         struct be_eth_wrb *wrb;
694         struct be_eth_hdr_wrb *hdr;
695         bool map_single = false;
696         u16 map_head;
697
698         hdr = queue_head_node(txq);
699         queue_head_inc(txq);
700         map_head = txq->head;
701
702         if (skb->len > skb->data_len) {
703                 int len = skb_headlen(skb);
704                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
705                 if (dma_mapping_error(dev, busaddr))
706                         goto dma_err;
707                 map_single = true;
708                 wrb = queue_head_node(txq);
709                 wrb_fill(wrb, busaddr, len);
710                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
711                 queue_head_inc(txq);
712                 copied += len;
713         }
714
715         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
716                 struct skb_frag_struct *frag =
717                         &skb_shinfo(skb)->frags[i];
718                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
719                                        frag->size, DMA_TO_DEVICE);
720                 if (dma_mapping_error(dev, busaddr))
721                         goto dma_err;
722                 wrb = queue_head_node(txq);
723                 wrb_fill(wrb, busaddr, frag->size);
724                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
725                 queue_head_inc(txq);
726                 copied += frag->size;
727         }
728
729         if (dummy_wrb) {
730                 wrb = queue_head_node(txq);
731                 wrb_fill(wrb, 0, 0);
732                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733                 queue_head_inc(txq);
734         }
735
736         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
737         be_dws_cpu_to_le(hdr, sizeof(*hdr));
738
739         return copied;
740 dma_err:
741         txq->head = map_head;
742         while (copied) {
743                 wrb = queue_head_node(txq);
744                 unmap_tx_frag(dev, wrb, map_single);
745                 map_single = false;
746                 copied -= wrb->frag_len;
747                 queue_head_inc(txq);
748         }
749         return 0;
750 }
751
752 static netdev_tx_t be_xmit(struct sk_buff *skb,
753                         struct net_device *netdev)
754 {
755         struct be_adapter *adapter = netdev_priv(netdev);
756         struct be_tx_obj *tx_obj = &adapter->tx_obj;
757         struct be_queue_info *txq = &tx_obj->q;
758         u32 wrb_cnt = 0, copied = 0;
759         u32 start = txq->head;
760         bool dummy_wrb, stopped = false;
761
762         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
763
764         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
765         if (copied) {
766                 int gso_segs = skb_shinfo(skb)->gso_segs;
767
768                 /* record the sent skb in the sent_skb table */
769                 BUG_ON(tx_obj->sent_skb_list[start]);
770                 tx_obj->sent_skb_list[start] = skb;
771
772                 /* Ensure txq has space for the next skb; Else stop the queue
773                  * *BEFORE* ringing the tx doorbell, so that we serialze the
774                  * tx compls of the current transmit which'll wake up the queue
775                  */
776                 atomic_add(wrb_cnt, &txq->used);
777                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
778                                                                 txq->len) {
779                         netif_stop_queue(netdev);
780                         stopped = true;
781                 }
782
783                 be_txq_notify(adapter, txq->id, wrb_cnt);
784
785                 be_tx_stats_update(adapter, wrb_cnt, copied, gso_segs, stopped);
786         } else {
787                 txq->head = start;
788                 dev_kfree_skb_any(skb);
789         }
790         return NETDEV_TX_OK;
791 }
792
793 static int be_change_mtu(struct net_device *netdev, int new_mtu)
794 {
795         struct be_adapter *adapter = netdev_priv(netdev);
796         if (new_mtu < BE_MIN_MTU ||
797                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
798                                         (ETH_HLEN + ETH_FCS_LEN))) {
799                 dev_info(&adapter->pdev->dev,
800                         "MTU must be between %d and %d bytes\n",
801                         BE_MIN_MTU,
802                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
803                 return -EINVAL;
804         }
805         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
806                         netdev->mtu, new_mtu);
807         netdev->mtu = new_mtu;
808         return 0;
809 }
810
811 /*
812  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
813  * If the user configures more, place BE in vlan promiscuous mode.
814  */
815 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
816 {
817         u16 vtag[BE_NUM_VLANS_SUPPORTED];
818         u16 ntags = 0, i;
819         int status = 0;
820         u32 if_handle;
821
822         if (vf) {
823                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
824                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
825                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
826         }
827
828         if (adapter->vlans_added <= adapter->max_vlans)  {
829                 /* Construct VLAN Table to give to HW */
830                 for (i = 0; i < VLAN_N_VID; i++) {
831                         if (adapter->vlan_tag[i]) {
832                                 vtag[ntags] = cpu_to_le16(i);
833                                 ntags++;
834                         }
835                 }
836                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
837                                         vtag, ntags, 1, 0);
838         } else {
839                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
840                                         NULL, 0, 1, 1);
841         }
842
843         return status;
844 }
845
846 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
847 {
848         struct be_adapter *adapter = netdev_priv(netdev);
849
850         adapter->vlan_grp = grp;
851 }
852
853 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
854 {
855         struct be_adapter *adapter = netdev_priv(netdev);
856
857         adapter->vlans_added++;
858         if (!be_physfn(adapter))
859                 return;
860
861         adapter->vlan_tag[vid] = 1;
862         if (adapter->vlans_added <= (adapter->max_vlans + 1))
863                 be_vid_config(adapter, false, 0);
864 }
865
866 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
867 {
868         struct be_adapter *adapter = netdev_priv(netdev);
869
870         adapter->vlans_added--;
871         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
872
873         if (!be_physfn(adapter))
874                 return;
875
876         adapter->vlan_tag[vid] = 0;
877         if (adapter->vlans_added <= adapter->max_vlans)
878                 be_vid_config(adapter, false, 0);
879 }
880
881 static void be_set_multicast_list(struct net_device *netdev)
882 {
883         struct be_adapter *adapter = netdev_priv(netdev);
884
885         if (netdev->flags & IFF_PROMISC) {
886                 be_cmd_promiscuous_config(adapter, true);
887                 adapter->promiscuous = true;
888                 goto done;
889         }
890
891         /* BE was previously in promiscuous mode; disable it */
892         if (adapter->promiscuous) {
893                 adapter->promiscuous = false;
894                 be_cmd_promiscuous_config(adapter, false);
895         }
896
897         /* Enable multicast promisc if num configured exceeds what we support */
898         if (netdev->flags & IFF_ALLMULTI ||
899             netdev_mc_count(netdev) > BE_MAX_MC) {
900                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
901                                 &adapter->mc_cmd_mem);
902                 goto done;
903         }
904
905         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
906                 &adapter->mc_cmd_mem);
907 done:
908         return;
909 }
910
911 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
912 {
913         struct be_adapter *adapter = netdev_priv(netdev);
914         int status;
915
916         if (!adapter->sriov_enabled)
917                 return -EPERM;
918
919         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
920                 return -EINVAL;
921
922         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
923                 status = be_cmd_pmac_del(adapter,
924                                         adapter->vf_cfg[vf].vf_if_handle,
925                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
926
927         status = be_cmd_pmac_add(adapter, mac,
928                                 adapter->vf_cfg[vf].vf_if_handle,
929                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
930
931         if (status)
932                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
933                                 mac, vf);
934         else
935                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
936
937         return status;
938 }
939
940 static int be_get_vf_config(struct net_device *netdev, int vf,
941                         struct ifla_vf_info *vi)
942 {
943         struct be_adapter *adapter = netdev_priv(netdev);
944
945         if (!adapter->sriov_enabled)
946                 return -EPERM;
947
948         if (vf >= num_vfs)
949                 return -EINVAL;
950
951         vi->vf = vf;
952         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
953         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
954         vi->qos = 0;
955         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
956
957         return 0;
958 }
959
960 static int be_set_vf_vlan(struct net_device *netdev,
961                         int vf, u16 vlan, u8 qos)
962 {
963         struct be_adapter *adapter = netdev_priv(netdev);
964         int status = 0;
965
966         if (!adapter->sriov_enabled)
967                 return -EPERM;
968
969         if ((vf >= num_vfs) || (vlan > 4095))
970                 return -EINVAL;
971
972         if (vlan) {
973                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
974                 adapter->vlans_added++;
975         } else {
976                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
977                 adapter->vlans_added--;
978         }
979
980         status = be_vid_config(adapter, true, vf);
981
982         if (status)
983                 dev_info(&adapter->pdev->dev,
984                                 "VLAN %d config on VF %d failed\n", vlan, vf);
985         return status;
986 }
987
988 static int be_set_vf_tx_rate(struct net_device *netdev,
989                         int vf, int rate)
990 {
991         struct be_adapter *adapter = netdev_priv(netdev);
992         int status = 0;
993
994         if (!adapter->sriov_enabled)
995                 return -EPERM;
996
997         if ((vf >= num_vfs) || (rate < 0))
998                 return -EINVAL;
999
1000         if (rate > 10000)
1001                 rate = 10000;
1002
1003         adapter->vf_cfg[vf].vf_tx_rate = rate;
1004         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1005
1006         if (status)
1007                 dev_info(&adapter->pdev->dev,
1008                                 "tx rate %d on VF %d failed\n", rate, vf);
1009         return status;
1010 }
1011
1012 static void be_rx_rate_update(struct be_rx_obj *rxo)
1013 {
1014         struct be_rx_stats *stats = &rxo->stats;
1015         ulong now = jiffies;
1016
1017         /* Wrapped around */
1018         if (time_before(now, stats->rx_jiffies)) {
1019                 stats->rx_jiffies = now;
1020                 return;
1021         }
1022
1023         /* Update the rate once in two seconds */
1024         if ((now - stats->rx_jiffies) < 2 * HZ)
1025                 return;
1026
1027         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1028                                 now - stats->rx_jiffies);
1029         stats->rx_jiffies = now;
1030         stats->rx_bytes_prev = stats->rx_bytes;
1031 }
1032
1033 static void be_rx_stats_update(struct be_rx_obj *rxo,
1034                 struct be_rx_compl_info *rxcp)
1035 {
1036         struct be_rx_stats *stats = &rxo->stats;
1037
1038         stats->rx_compl++;
1039         stats->rx_frags += rxcp->num_rcvd;
1040         stats->rx_bytes += rxcp->pkt_size;
1041         stats->rx_pkts++;
1042         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1043                 stats->rx_mcast_pkts++;
1044         if (rxcp->err)
1045                 stats->rxcp_err++;
1046 }
1047
1048 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1049 {
1050         /* L4 checksum is not reliable for non TCP/UDP packets.
1051          * Also ignore ipcksm for ipv6 pkts */
1052         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1053                                 (rxcp->ip_csum || rxcp->ipv6);
1054 }
1055
1056 static struct be_rx_page_info *
1057 get_rx_page_info(struct be_adapter *adapter,
1058                 struct be_rx_obj *rxo,
1059                 u16 frag_idx)
1060 {
1061         struct be_rx_page_info *rx_page_info;
1062         struct be_queue_info *rxq = &rxo->q;
1063
1064         rx_page_info = &rxo->page_info_tbl[frag_idx];
1065         BUG_ON(!rx_page_info->page);
1066
1067         if (rx_page_info->last_page_user) {
1068                 dma_unmap_page(&adapter->pdev->dev,
1069                                dma_unmap_addr(rx_page_info, bus),
1070                                adapter->big_page_size, DMA_FROM_DEVICE);
1071                 rx_page_info->last_page_user = false;
1072         }
1073
1074         atomic_dec(&rxq->used);
1075         return rx_page_info;
1076 }
1077
1078 /* Throwaway the data in the Rx completion */
1079 static void be_rx_compl_discard(struct be_adapter *adapter,
1080                 struct be_rx_obj *rxo,
1081                 struct be_rx_compl_info *rxcp)
1082 {
1083         struct be_queue_info *rxq = &rxo->q;
1084         struct be_rx_page_info *page_info;
1085         u16 i, num_rcvd = rxcp->num_rcvd;
1086
1087         for (i = 0; i < num_rcvd; i++) {
1088                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1089                 put_page(page_info->page);
1090                 memset(page_info, 0, sizeof(*page_info));
1091                 index_inc(&rxcp->rxq_idx, rxq->len);
1092         }
1093 }
1094
1095 /*
1096  * skb_fill_rx_data forms a complete skb for an ether frame
1097  * indicated by rxcp.
1098  */
1099 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1100                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1101 {
1102         struct be_queue_info *rxq = &rxo->q;
1103         struct be_rx_page_info *page_info;
1104         u16 i, j;
1105         u16 hdr_len, curr_frag_len, remaining;
1106         u8 *start;
1107
1108         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1109         start = page_address(page_info->page) + page_info->page_offset;
1110         prefetch(start);
1111
1112         /* Copy data in the first descriptor of this completion */
1113         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1114
1115         /* Copy the header portion into skb_data */
1116         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1117         memcpy(skb->data, start, hdr_len);
1118         skb->len = curr_frag_len;
1119         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1120                 /* Complete packet has now been moved to data */
1121                 put_page(page_info->page);
1122                 skb->data_len = 0;
1123                 skb->tail += curr_frag_len;
1124         } else {
1125                 skb_shinfo(skb)->nr_frags = 1;
1126                 skb_shinfo(skb)->frags[0].page = page_info->page;
1127                 skb_shinfo(skb)->frags[0].page_offset =
1128                                         page_info->page_offset + hdr_len;
1129                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1130                 skb->data_len = curr_frag_len - hdr_len;
1131                 skb->tail += hdr_len;
1132         }
1133         page_info->page = NULL;
1134
1135         if (rxcp->pkt_size <= rx_frag_size) {
1136                 BUG_ON(rxcp->num_rcvd != 1);
1137                 return;
1138         }
1139
1140         /* More frags present for this completion */
1141         index_inc(&rxcp->rxq_idx, rxq->len);
1142         remaining = rxcp->pkt_size - curr_frag_len;
1143         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1144                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1145                 curr_frag_len = min(remaining, rx_frag_size);
1146
1147                 /* Coalesce all frags from the same physical page in one slot */
1148                 if (page_info->page_offset == 0) {
1149                         /* Fresh page */
1150                         j++;
1151                         skb_shinfo(skb)->frags[j].page = page_info->page;
1152                         skb_shinfo(skb)->frags[j].page_offset =
1153                                                         page_info->page_offset;
1154                         skb_shinfo(skb)->frags[j].size = 0;
1155                         skb_shinfo(skb)->nr_frags++;
1156                 } else {
1157                         put_page(page_info->page);
1158                 }
1159
1160                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1161                 skb->len += curr_frag_len;
1162                 skb->data_len += curr_frag_len;
1163
1164                 remaining -= curr_frag_len;
1165                 index_inc(&rxcp->rxq_idx, rxq->len);
1166                 page_info->page = NULL;
1167         }
1168         BUG_ON(j > MAX_SKB_FRAGS);
1169 }
1170
1171 /* Process the RX completion indicated by rxcp when GRO is disabled */
1172 static void be_rx_compl_process(struct be_adapter *adapter,
1173                         struct be_rx_obj *rxo,
1174                         struct be_rx_compl_info *rxcp)
1175 {
1176         struct net_device *netdev = adapter->netdev;
1177         struct sk_buff *skb;
1178
1179         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1180         if (unlikely(!skb)) {
1181                 if (net_ratelimit())
1182                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1183                 be_rx_compl_discard(adapter, rxo, rxcp);
1184                 return;
1185         }
1186
1187         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1188
1189         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1190                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1191         else
1192                 skb_checksum_none_assert(skb);
1193
1194         skb->truesize = skb->len + sizeof(struct sk_buff);
1195         skb->protocol = eth_type_trans(skb, netdev);
1196         if (adapter->netdev->features & NETIF_F_RXHASH)
1197                 skb->rxhash = rxcp->rss_hash;
1198
1199
1200         if (unlikely(rxcp->vlanf)) {
1201                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1202                         kfree_skb(skb);
1203                         return;
1204                 }
1205                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1206                                         rxcp->vlan_tag);
1207         } else {
1208                 netif_receive_skb(skb);
1209         }
1210 }
1211
1212 /* Process the RX completion indicated by rxcp when GRO is enabled */
1213 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1214                 struct be_rx_obj *rxo,
1215                 struct be_rx_compl_info *rxcp)
1216 {
1217         struct be_rx_page_info *page_info;
1218         struct sk_buff *skb = NULL;
1219         struct be_queue_info *rxq = &rxo->q;
1220         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1221         u16 remaining, curr_frag_len;
1222         u16 i, j;
1223
1224         skb = napi_get_frags(&eq_obj->napi);
1225         if (!skb) {
1226                 be_rx_compl_discard(adapter, rxo, rxcp);
1227                 return;
1228         }
1229
1230         remaining = rxcp->pkt_size;
1231         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1232                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1233
1234                 curr_frag_len = min(remaining, rx_frag_size);
1235
1236                 /* Coalesce all frags from the same physical page in one slot */
1237                 if (i == 0 || page_info->page_offset == 0) {
1238                         /* First frag or Fresh page */
1239                         j++;
1240                         skb_shinfo(skb)->frags[j].page = page_info->page;
1241                         skb_shinfo(skb)->frags[j].page_offset =
1242                                                         page_info->page_offset;
1243                         skb_shinfo(skb)->frags[j].size = 0;
1244                 } else {
1245                         put_page(page_info->page);
1246                 }
1247                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1248
1249                 remaining -= curr_frag_len;
1250                 index_inc(&rxcp->rxq_idx, rxq->len);
1251                 memset(page_info, 0, sizeof(*page_info));
1252         }
1253         BUG_ON(j > MAX_SKB_FRAGS);
1254
1255         skb_shinfo(skb)->nr_frags = j + 1;
1256         skb->len = rxcp->pkt_size;
1257         skb->data_len = rxcp->pkt_size;
1258         skb->truesize += rxcp->pkt_size;
1259         skb->ip_summed = CHECKSUM_UNNECESSARY;
1260         if (adapter->netdev->features & NETIF_F_RXHASH)
1261                 skb->rxhash = rxcp->rss_hash;
1262
1263         if (likely(!rxcp->vlanf))
1264                 napi_gro_frags(&eq_obj->napi);
1265         else
1266                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1267                                 rxcp->vlan_tag);
1268 }
1269
1270 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1271                                 struct be_eth_rx_compl *compl,
1272                                 struct be_rx_compl_info *rxcp)
1273 {
1274         rxcp->pkt_size =
1275                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1276         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1277         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1278         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1279         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1280         rxcp->ip_csum =
1281                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1282         rxcp->l4_csum =
1283                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1284         rxcp->ipv6 =
1285                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1286         rxcp->rxq_idx =
1287                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1288         rxcp->num_rcvd =
1289                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1290         rxcp->pkt_type =
1291                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1292         rxcp->rss_hash =
1293                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1294         if (rxcp->vlanf) {
1295                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1296                                           compl);
1297                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1298                                                compl);
1299         }
1300 }
1301
1302 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1303                                 struct be_eth_rx_compl *compl,
1304                                 struct be_rx_compl_info *rxcp)
1305 {
1306         rxcp->pkt_size =
1307                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1308         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1309         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1310         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1311         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1312         rxcp->ip_csum =
1313                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1314         rxcp->l4_csum =
1315                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1316         rxcp->ipv6 =
1317                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1318         rxcp->rxq_idx =
1319                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1320         rxcp->num_rcvd =
1321                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1322         rxcp->pkt_type =
1323                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1324         rxcp->rss_hash =
1325                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1326         if (rxcp->vlanf) {
1327                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1328                                           compl);
1329                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1330                                                compl);
1331         }
1332 }
1333
1334 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1335 {
1336         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1337         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1338         struct be_adapter *adapter = rxo->adapter;
1339
1340         /* For checking the valid bit it is Ok to use either definition as the
1341          * valid bit is at the same position in both v0 and v1 Rx compl */
1342         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1343                 return NULL;
1344
1345         rmb();
1346         be_dws_le_to_cpu(compl, sizeof(*compl));
1347
1348         if (adapter->be3_native)
1349                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1350         else
1351                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1352
1353         if (rxcp->vlanf) {
1354                 /* vlanf could be wrongly set in some cards.
1355                  * ignore if vtm is not set */
1356                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1357                         rxcp->vlanf = 0;
1358
1359                 if (!lancer_chip(adapter))
1360                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1361
1362                 if (((adapter->pvid & VLAN_VID_MASK) ==
1363                      (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1364                     !adapter->vlan_tag[rxcp->vlan_tag])
1365                         rxcp->vlanf = 0;
1366         }
1367
1368         /* As the compl has been parsed, reset it; we wont touch it again */
1369         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1370
1371         queue_tail_inc(&rxo->cq);
1372         return rxcp;
1373 }
1374
1375 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1376 {
1377         u32 order = get_order(size);
1378
1379         if (order > 0)
1380                 gfp |= __GFP_COMP;
1381         return  alloc_pages(gfp, order);
1382 }
1383
1384 /*
1385  * Allocate a page, split it to fragments of size rx_frag_size and post as
1386  * receive buffers to BE
1387  */
1388 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1389 {
1390         struct be_adapter *adapter = rxo->adapter;
1391         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1392         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1393         struct be_queue_info *rxq = &rxo->q;
1394         struct page *pagep = NULL;
1395         struct be_eth_rx_d *rxd;
1396         u64 page_dmaaddr = 0, frag_dmaaddr;
1397         u32 posted, page_offset = 0;
1398
1399         page_info = &rxo->page_info_tbl[rxq->head];
1400         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1401                 if (!pagep) {
1402                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1403                         if (unlikely(!pagep)) {
1404                                 rxo->stats.rx_post_fail++;
1405                                 break;
1406                         }
1407                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1408                                                     0, adapter->big_page_size,
1409                                                     DMA_FROM_DEVICE);
1410                         page_info->page_offset = 0;
1411                 } else {
1412                         get_page(pagep);
1413                         page_info->page_offset = page_offset + rx_frag_size;
1414                 }
1415                 page_offset = page_info->page_offset;
1416                 page_info->page = pagep;
1417                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1418                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1419
1420                 rxd = queue_head_node(rxq);
1421                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1422                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1423
1424                 /* Any space left in the current big page for another frag? */
1425                 if ((page_offset + rx_frag_size + rx_frag_size) >
1426                                         adapter->big_page_size) {
1427                         pagep = NULL;
1428                         page_info->last_page_user = true;
1429                 }
1430
1431                 prev_page_info = page_info;
1432                 queue_head_inc(rxq);
1433                 page_info = &page_info_tbl[rxq->head];
1434         }
1435         if (pagep)
1436                 prev_page_info->last_page_user = true;
1437
1438         if (posted) {
1439                 atomic_add(posted, &rxq->used);
1440                 be_rxq_notify(adapter, rxq->id, posted);
1441         } else if (atomic_read(&rxq->used) == 0) {
1442                 /* Let be_worker replenish when memory is available */
1443                 rxo->rx_post_starved = true;
1444         }
1445 }
1446
1447 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1448 {
1449         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1450
1451         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1452                 return NULL;
1453
1454         rmb();
1455         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1456
1457         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1458
1459         queue_tail_inc(tx_cq);
1460         return txcp;
1461 }
1462
1463 static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1464 {
1465         struct be_queue_info *txq = &adapter->tx_obj.q;
1466         struct be_eth_wrb *wrb;
1467         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1468         struct sk_buff *sent_skb;
1469         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1470         bool unmap_skb_hdr = true;
1471
1472         sent_skb = sent_skbs[txq->tail];
1473         BUG_ON(!sent_skb);
1474         sent_skbs[txq->tail] = NULL;
1475
1476         /* skip header wrb */
1477         queue_tail_inc(txq);
1478
1479         do {
1480                 cur_index = txq->tail;
1481                 wrb = queue_tail_node(txq);
1482                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1483                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1484                 unmap_skb_hdr = false;
1485
1486                 num_wrbs++;
1487                 queue_tail_inc(txq);
1488         } while (cur_index != last_index);
1489
1490         kfree_skb(sent_skb);
1491         return num_wrbs;
1492 }
1493
1494 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1495 {
1496         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1497
1498         if (!eqe->evt)
1499                 return NULL;
1500
1501         rmb();
1502         eqe->evt = le32_to_cpu(eqe->evt);
1503         queue_tail_inc(&eq_obj->q);
1504         return eqe;
1505 }
1506
1507 static int event_handle(struct be_adapter *adapter,
1508                         struct be_eq_obj *eq_obj)
1509 {
1510         struct be_eq_entry *eqe;
1511         u16 num = 0;
1512
1513         while ((eqe = event_get(eq_obj)) != NULL) {
1514                 eqe->evt = 0;
1515                 num++;
1516         }
1517
1518         /* Deal with any spurious interrupts that come
1519          * without events
1520          */
1521         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1522         if (num)
1523                 napi_schedule(&eq_obj->napi);
1524
1525         return num;
1526 }
1527
1528 /* Just read and notify events without processing them.
1529  * Used at the time of destroying event queues */
1530 static void be_eq_clean(struct be_adapter *adapter,
1531                         struct be_eq_obj *eq_obj)
1532 {
1533         struct be_eq_entry *eqe;
1534         u16 num = 0;
1535
1536         while ((eqe = event_get(eq_obj)) != NULL) {
1537                 eqe->evt = 0;
1538                 num++;
1539         }
1540
1541         if (num)
1542                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1543 }
1544
1545 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1546 {
1547         struct be_rx_page_info *page_info;
1548         struct be_queue_info *rxq = &rxo->q;
1549         struct be_queue_info *rx_cq = &rxo->cq;
1550         struct be_rx_compl_info *rxcp;
1551         u16 tail;
1552
1553         /* First cleanup pending rx completions */
1554         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1555                 be_rx_compl_discard(adapter, rxo, rxcp);
1556                 be_cq_notify(adapter, rx_cq->id, false, 1);
1557         }
1558
1559         /* Then free posted rx buffer that were not used */
1560         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1561         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1562                 page_info = get_rx_page_info(adapter, rxo, tail);
1563                 put_page(page_info->page);
1564                 memset(page_info, 0, sizeof(*page_info));
1565         }
1566         BUG_ON(atomic_read(&rxq->used));
1567 }
1568
1569 static void be_tx_compl_clean(struct be_adapter *adapter)
1570 {
1571         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1572         struct be_queue_info *txq = &adapter->tx_obj.q;
1573         struct be_eth_tx_compl *txcp;
1574         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1575         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1576         struct sk_buff *sent_skb;
1577         bool dummy_wrb;
1578
1579         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1580         do {
1581                 while ((txcp = be_tx_compl_get(tx_cq))) {
1582                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1583                                         wrb_index, txcp);
1584                         num_wrbs += be_tx_compl_process(adapter, end_idx);
1585                         cmpl++;
1586                 }
1587                 if (cmpl) {
1588                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1589                         atomic_sub(num_wrbs, &txq->used);
1590                         cmpl = 0;
1591                         num_wrbs = 0;
1592                 }
1593
1594                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1595                         break;
1596
1597                 mdelay(1);
1598         } while (true);
1599
1600         if (atomic_read(&txq->used))
1601                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1602                         atomic_read(&txq->used));
1603
1604         /* free posted tx for which compls will never arrive */
1605         while (atomic_read(&txq->used)) {
1606                 sent_skb = sent_skbs[txq->tail];
1607                 end_idx = txq->tail;
1608                 index_adv(&end_idx,
1609                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1610                         txq->len);
1611                 num_wrbs = be_tx_compl_process(adapter, end_idx);
1612                 atomic_sub(num_wrbs, &txq->used);
1613         }
1614 }
1615
1616 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1617 {
1618         struct be_queue_info *q;
1619
1620         q = &adapter->mcc_obj.q;
1621         if (q->created)
1622                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1623         be_queue_free(adapter, q);
1624
1625         q = &adapter->mcc_obj.cq;
1626         if (q->created)
1627                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1628         be_queue_free(adapter, q);
1629 }
1630
1631 /* Must be called only after TX qs are created as MCC shares TX EQ */
1632 static int be_mcc_queues_create(struct be_adapter *adapter)
1633 {
1634         struct be_queue_info *q, *cq;
1635
1636         /* Alloc MCC compl queue */
1637         cq = &adapter->mcc_obj.cq;
1638         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1639                         sizeof(struct be_mcc_compl)))
1640                 goto err;
1641
1642         /* Ask BE to create MCC compl queue; share TX's eq */
1643         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1644                 goto mcc_cq_free;
1645
1646         /* Alloc MCC queue */
1647         q = &adapter->mcc_obj.q;
1648         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1649                 goto mcc_cq_destroy;
1650
1651         /* Ask BE to create MCC queue */
1652         if (be_cmd_mccq_create(adapter, q, cq))
1653                 goto mcc_q_free;
1654
1655         return 0;
1656
1657 mcc_q_free:
1658         be_queue_free(adapter, q);
1659 mcc_cq_destroy:
1660         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1661 mcc_cq_free:
1662         be_queue_free(adapter, cq);
1663 err:
1664         return -1;
1665 }
1666
1667 static void be_tx_queues_destroy(struct be_adapter *adapter)
1668 {
1669         struct be_queue_info *q;
1670
1671         q = &adapter->tx_obj.q;
1672         if (q->created)
1673                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1674         be_queue_free(adapter, q);
1675
1676         q = &adapter->tx_obj.cq;
1677         if (q->created)
1678                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1679         be_queue_free(adapter, q);
1680
1681         /* Clear any residual events */
1682         be_eq_clean(adapter, &adapter->tx_eq);
1683
1684         q = &adapter->tx_eq.q;
1685         if (q->created)
1686                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1687         be_queue_free(adapter, q);
1688 }
1689
1690 static int be_tx_queues_create(struct be_adapter *adapter)
1691 {
1692         struct be_queue_info *eq, *q, *cq;
1693
1694         adapter->tx_eq.max_eqd = 0;
1695         adapter->tx_eq.min_eqd = 0;
1696         adapter->tx_eq.cur_eqd = 96;
1697         adapter->tx_eq.enable_aic = false;
1698         /* Alloc Tx Event queue */
1699         eq = &adapter->tx_eq.q;
1700         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1701                 return -1;
1702
1703         /* Ask BE to create Tx Event queue */
1704         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1705                 goto tx_eq_free;
1706
1707         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1708
1709
1710         /* Alloc TX eth compl queue */
1711         cq = &adapter->tx_obj.cq;
1712         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1713                         sizeof(struct be_eth_tx_compl)))
1714                 goto tx_eq_destroy;
1715
1716         /* Ask BE to create Tx eth compl queue */
1717         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1718                 goto tx_cq_free;
1719
1720         /* Alloc TX eth queue */
1721         q = &adapter->tx_obj.q;
1722         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1723                 goto tx_cq_destroy;
1724
1725         /* Ask BE to create Tx eth queue */
1726         if (be_cmd_txq_create(adapter, q, cq))
1727                 goto tx_q_free;
1728         return 0;
1729
1730 tx_q_free:
1731         be_queue_free(adapter, q);
1732 tx_cq_destroy:
1733         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1734 tx_cq_free:
1735         be_queue_free(adapter, cq);
1736 tx_eq_destroy:
1737         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1738 tx_eq_free:
1739         be_queue_free(adapter, eq);
1740         return -1;
1741 }
1742
1743 static void be_rx_queues_destroy(struct be_adapter *adapter)
1744 {
1745         struct be_queue_info *q;
1746         struct be_rx_obj *rxo;
1747         int i;
1748
1749         for_all_rx_queues(adapter, rxo, i) {
1750                 q = &rxo->q;
1751                 if (q->created) {
1752                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1753                         /* After the rxq is invalidated, wait for a grace time
1754                          * of 1ms for all dma to end and the flush compl to
1755                          * arrive
1756                          */
1757                         mdelay(1);
1758                         be_rx_q_clean(adapter, rxo);
1759                 }
1760                 be_queue_free(adapter, q);
1761
1762                 q = &rxo->cq;
1763                 if (q->created)
1764                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1765                 be_queue_free(adapter, q);
1766
1767                 /* Clear any residual events */
1768                 q = &rxo->rx_eq.q;
1769                 if (q->created) {
1770                         be_eq_clean(adapter, &rxo->rx_eq);
1771                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1772                 }
1773                 be_queue_free(adapter, q);
1774         }
1775 }
1776
1777 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1778 {
1779         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1780                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1781                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1782         } else {
1783                 dev_warn(&adapter->pdev->dev,
1784                         "No support for multiple RX queues\n");
1785                 return 1;
1786         }
1787 }
1788
1789 static int be_rx_queues_create(struct be_adapter *adapter)
1790 {
1791         struct be_queue_info *eq, *q, *cq;
1792         struct be_rx_obj *rxo;
1793         int rc, i;
1794
1795         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1796                                 msix_enabled(adapter) ?
1797                                         adapter->num_msix_vec - 1 : 1);
1798         if (adapter->num_rx_qs != MAX_RX_QS)
1799                 dev_warn(&adapter->pdev->dev,
1800                         "Can create only %d RX queues", adapter->num_rx_qs);
1801
1802         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1803         for_all_rx_queues(adapter, rxo, i) {
1804                 rxo->adapter = adapter;
1805                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1806                 rxo->rx_eq.enable_aic = true;
1807
1808                 /* EQ */
1809                 eq = &rxo->rx_eq.q;
1810                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1811                                         sizeof(struct be_eq_entry));
1812                 if (rc)
1813                         goto err;
1814
1815                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1816                 if (rc)
1817                         goto err;
1818
1819                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1820
1821                 /* CQ */
1822                 cq = &rxo->cq;
1823                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1824                                 sizeof(struct be_eth_rx_compl));
1825                 if (rc)
1826                         goto err;
1827
1828                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1829                 if (rc)
1830                         goto err;
1831                 /* Rx Q */
1832                 q = &rxo->q;
1833                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1834                                 sizeof(struct be_eth_rx_d));
1835                 if (rc)
1836                         goto err;
1837
1838                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1839                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1840                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1841                 if (rc)
1842                         goto err;
1843         }
1844
1845         if (be_multi_rxq(adapter)) {
1846                 u8 rsstable[MAX_RSS_QS];
1847
1848                 for_all_rss_queues(adapter, rxo, i)
1849                         rsstable[i] = rxo->rss_id;
1850
1851                 rc = be_cmd_rss_config(adapter, rsstable,
1852                         adapter->num_rx_qs - 1);
1853                 if (rc)
1854                         goto err;
1855         }
1856
1857         return 0;
1858 err:
1859         be_rx_queues_destroy(adapter);
1860         return -1;
1861 }
1862
1863 static bool event_peek(struct be_eq_obj *eq_obj)
1864 {
1865         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1866         if (!eqe->evt)
1867                 return false;
1868         else
1869                 return true;
1870 }
1871
1872 static irqreturn_t be_intx(int irq, void *dev)
1873 {
1874         struct be_adapter *adapter = dev;
1875         struct be_rx_obj *rxo;
1876         int isr, i, tx = 0 , rx = 0;
1877
1878         if (lancer_chip(adapter)) {
1879                 if (event_peek(&adapter->tx_eq))
1880                         tx = event_handle(adapter, &adapter->tx_eq);
1881                 for_all_rx_queues(adapter, rxo, i) {
1882                         if (event_peek(&rxo->rx_eq))
1883                                 rx |= event_handle(adapter, &rxo->rx_eq);
1884                 }
1885
1886                 if (!(tx || rx))
1887                         return IRQ_NONE;
1888
1889         } else {
1890                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1891                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1892                 if (!isr)
1893                         return IRQ_NONE;
1894
1895                 if ((1 << adapter->tx_eq.eq_idx & isr))
1896                         event_handle(adapter, &adapter->tx_eq);
1897
1898                 for_all_rx_queues(adapter, rxo, i) {
1899                         if ((1 << rxo->rx_eq.eq_idx & isr))
1900                                 event_handle(adapter, &rxo->rx_eq);
1901                 }
1902         }
1903
1904         return IRQ_HANDLED;
1905 }
1906
1907 static irqreturn_t be_msix_rx(int irq, void *dev)
1908 {
1909         struct be_rx_obj *rxo = dev;
1910         struct be_adapter *adapter = rxo->adapter;
1911
1912         event_handle(adapter, &rxo->rx_eq);
1913
1914         return IRQ_HANDLED;
1915 }
1916
1917 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1918 {
1919         struct be_adapter *adapter = dev;
1920
1921         event_handle(adapter, &adapter->tx_eq);
1922
1923         return IRQ_HANDLED;
1924 }
1925
1926 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1927 {
1928         return (rxcp->tcpf && !rxcp->err) ? true : false;
1929 }
1930
1931 static int be_poll_rx(struct napi_struct *napi, int budget)
1932 {
1933         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1934         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1935         struct be_adapter *adapter = rxo->adapter;
1936         struct be_queue_info *rx_cq = &rxo->cq;
1937         struct be_rx_compl_info *rxcp;
1938         u32 work_done;
1939
1940         rxo->stats.rx_polls++;
1941         for (work_done = 0; work_done < budget; work_done++) {
1942                 rxcp = be_rx_compl_get(rxo);
1943                 if (!rxcp)
1944                         break;
1945
1946                 /* Ignore flush completions */
1947                 if (rxcp->num_rcvd && rxcp->pkt_size) {
1948                         if (do_gro(rxcp))
1949                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1950                         else
1951                                 be_rx_compl_process(adapter, rxo, rxcp);
1952                 } else if (rxcp->pkt_size == 0) {
1953                         be_rx_compl_discard(adapter, rxo, rxcp);
1954                 }
1955
1956                 be_rx_stats_update(rxo, rxcp);
1957         }
1958
1959         /* Refill the queue */
1960         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1961                 be_post_rx_frags(rxo, GFP_ATOMIC);
1962
1963         /* All consumed */
1964         if (work_done < budget) {
1965                 napi_complete(napi);
1966                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1967         } else {
1968                 /* More to be consumed; continue with interrupts disabled */
1969                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1970         }
1971         return work_done;
1972 }
1973
1974 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1975  * For TX/MCC we don't honour budget; consume everything
1976  */
1977 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1978 {
1979         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1980         struct be_adapter *adapter =
1981                 container_of(tx_eq, struct be_adapter, tx_eq);
1982         struct be_queue_info *txq = &adapter->tx_obj.q;
1983         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1984         struct be_eth_tx_compl *txcp;
1985         int tx_compl = 0, mcc_compl, status = 0;
1986         u16 end_idx, num_wrbs = 0;
1987
1988         while ((txcp = be_tx_compl_get(tx_cq))) {
1989                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1990                                 wrb_index, txcp);
1991                 num_wrbs += be_tx_compl_process(adapter, end_idx);
1992                 tx_compl++;
1993         }
1994
1995         mcc_compl = be_process_mcc(adapter, &status);
1996
1997         napi_complete(napi);
1998
1999         if (mcc_compl) {
2000                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2001                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2002         }
2003
2004         if (tx_compl) {
2005                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
2006
2007                 atomic_sub(num_wrbs, &txq->used);
2008
2009                 /* As Tx wrbs have been freed up, wake up netdev queue if
2010                  * it was stopped due to lack of tx wrbs.
2011                  */
2012                 if (netif_queue_stopped(adapter->netdev) &&
2013                         atomic_read(&txq->used) < txq->len / 2) {
2014                         netif_wake_queue(adapter->netdev);
2015                 }
2016
2017                 tx_stats(adapter)->be_tx_events++;
2018                 tx_stats(adapter)->be_tx_compl += tx_compl;
2019         }
2020
2021         return 1;
2022 }
2023
2024 void be_detect_dump_ue(struct be_adapter *adapter)
2025 {
2026         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2027         u32 i;
2028
2029         pci_read_config_dword(adapter->pdev,
2030                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2031         pci_read_config_dword(adapter->pdev,
2032                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2033         pci_read_config_dword(adapter->pdev,
2034                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2035         pci_read_config_dword(adapter->pdev,
2036                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2037
2038         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2039         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2040
2041         if (ue_status_lo || ue_status_hi) {
2042                 adapter->ue_detected = true;
2043                 adapter->eeh_err = true;
2044                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2045         }
2046
2047         if (ue_status_lo) {
2048                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2049                         if (ue_status_lo & 1)
2050                                 dev_err(&adapter->pdev->dev,
2051                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2052                 }
2053         }
2054         if (ue_status_hi) {
2055                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2056                         if (ue_status_hi & 1)
2057                                 dev_err(&adapter->pdev->dev,
2058                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2059                 }
2060         }
2061
2062 }
2063
2064 static void be_worker(struct work_struct *work)
2065 {
2066         struct be_adapter *adapter =
2067                 container_of(work, struct be_adapter, work.work);
2068         struct be_rx_obj *rxo;
2069         int i;
2070
2071         if (!adapter->ue_detected && !lancer_chip(adapter))
2072                 be_detect_dump_ue(adapter);
2073
2074         /* when interrupts are not yet enabled, just reap any pending
2075         * mcc completions */
2076         if (!netif_running(adapter->netdev)) {
2077                 int mcc_compl, status = 0;
2078
2079                 mcc_compl = be_process_mcc(adapter, &status);
2080
2081                 if (mcc_compl) {
2082                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2083                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2084                 }
2085
2086                 goto reschedule;
2087         }
2088
2089         if (!adapter->stats_cmd_sent) {
2090                 if (lancer_chip(adapter))
2091                         lancer_cmd_get_pport_stats(adapter,
2092                                                 &adapter->stats_cmd);
2093                 else
2094                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2095         }
2096         be_tx_rate_update(adapter);
2097
2098         for_all_rx_queues(adapter, rxo, i) {
2099                 be_rx_rate_update(rxo);
2100                 be_rx_eqd_update(adapter, rxo);
2101
2102                 if (rxo->rx_post_starved) {
2103                         rxo->rx_post_starved = false;
2104                         be_post_rx_frags(rxo, GFP_KERNEL);
2105                 }
2106         }
2107
2108 reschedule:
2109         adapter->work_counter++;
2110         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2111 }
2112
2113 static void be_msix_disable(struct be_adapter *adapter)
2114 {
2115         if (msix_enabled(adapter)) {
2116                 pci_disable_msix(adapter->pdev);
2117                 adapter->num_msix_vec = 0;
2118         }
2119 }
2120
2121 static void be_msix_enable(struct be_adapter *adapter)
2122 {
2123 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2124         int i, status, num_vec;
2125
2126         num_vec = be_num_rxqs_want(adapter) + 1;
2127
2128         for (i = 0; i < num_vec; i++)
2129                 adapter->msix_entries[i].entry = i;
2130
2131         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2132         if (status == 0) {
2133                 goto done;
2134         } else if (status >= BE_MIN_MSIX_VECTORS) {
2135                 num_vec = status;
2136                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2137                                 num_vec) == 0)
2138                         goto done;
2139         }
2140         return;
2141 done:
2142         adapter->num_msix_vec = num_vec;
2143         return;
2144 }
2145
2146 static void be_sriov_enable(struct be_adapter *adapter)
2147 {
2148         be_check_sriov_fn_type(adapter);
2149 #ifdef CONFIG_PCI_IOV
2150         if (be_physfn(adapter) && num_vfs) {
2151                 int status, pos;
2152                 u16 nvfs;
2153
2154                 pos = pci_find_ext_capability(adapter->pdev,
2155                                                 PCI_EXT_CAP_ID_SRIOV);
2156                 pci_read_config_word(adapter->pdev,
2157                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2158
2159                 if (num_vfs > nvfs) {
2160                         dev_info(&adapter->pdev->dev,
2161                                         "Device supports %d VFs and not %d\n",
2162                                         nvfs, num_vfs);
2163                         num_vfs = nvfs;
2164                 }
2165
2166                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2167                 adapter->sriov_enabled = status ? false : true;
2168         }
2169 #endif
2170 }
2171
2172 static void be_sriov_disable(struct be_adapter *adapter)
2173 {
2174 #ifdef CONFIG_PCI_IOV
2175         if (adapter->sriov_enabled) {
2176                 pci_disable_sriov(adapter->pdev);
2177                 adapter->sriov_enabled = false;
2178         }
2179 #endif
2180 }
2181
2182 static inline int be_msix_vec_get(struct be_adapter *adapter,
2183                                         struct be_eq_obj *eq_obj)
2184 {
2185         return adapter->msix_entries[eq_obj->eq_idx].vector;
2186 }
2187
2188 static int be_request_irq(struct be_adapter *adapter,
2189                 struct be_eq_obj *eq_obj,
2190                 void *handler, char *desc, void *context)
2191 {
2192         struct net_device *netdev = adapter->netdev;
2193         int vec;
2194
2195         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2196         vec = be_msix_vec_get(adapter, eq_obj);
2197         return request_irq(vec, handler, 0, eq_obj->desc, context);
2198 }
2199
2200 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2201                         void *context)
2202 {
2203         int vec = be_msix_vec_get(adapter, eq_obj);
2204         free_irq(vec, context);
2205 }
2206
2207 static int be_msix_register(struct be_adapter *adapter)
2208 {
2209         struct be_rx_obj *rxo;
2210         int status, i;
2211         char qname[10];
2212
2213         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2214                                 adapter);
2215         if (status)
2216                 goto err;
2217
2218         for_all_rx_queues(adapter, rxo, i) {
2219                 sprintf(qname, "rxq%d", i);
2220                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2221                                 qname, rxo);
2222                 if (status)
2223                         goto err_msix;
2224         }
2225
2226         return 0;
2227
2228 err_msix:
2229         be_free_irq(adapter, &adapter->tx_eq, adapter);
2230
2231         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2232                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2233
2234 err:
2235         dev_warn(&adapter->pdev->dev,
2236                 "MSIX Request IRQ failed - err %d\n", status);
2237         be_msix_disable(adapter);
2238         return status;
2239 }
2240
2241 static int be_irq_register(struct be_adapter *adapter)
2242 {
2243         struct net_device *netdev = adapter->netdev;
2244         int status;
2245
2246         if (msix_enabled(adapter)) {
2247                 status = be_msix_register(adapter);
2248                 if (status == 0)
2249                         goto done;
2250                 /* INTx is not supported for VF */
2251                 if (!be_physfn(adapter))
2252                         return status;
2253         }
2254
2255         /* INTx */
2256         netdev->irq = adapter->pdev->irq;
2257         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2258                         adapter);
2259         if (status) {
2260                 dev_err(&adapter->pdev->dev,
2261                         "INTx request IRQ failed - err %d\n", status);
2262                 return status;
2263         }
2264 done:
2265         adapter->isr_registered = true;
2266         return 0;
2267 }
2268
2269 static void be_irq_unregister(struct be_adapter *adapter)
2270 {
2271         struct net_device *netdev = adapter->netdev;
2272         struct be_rx_obj *rxo;
2273         int i;
2274
2275         if (!adapter->isr_registered)
2276                 return;
2277
2278         /* INTx */
2279         if (!msix_enabled(adapter)) {
2280                 free_irq(netdev->irq, adapter);
2281                 goto done;
2282         }
2283
2284         /* MSIx */
2285         be_free_irq(adapter, &adapter->tx_eq, adapter);
2286
2287         for_all_rx_queues(adapter, rxo, i)
2288                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2289
2290 done:
2291         adapter->isr_registered = false;
2292 }
2293
2294 static int be_close(struct net_device *netdev)
2295 {
2296         struct be_adapter *adapter = netdev_priv(netdev);
2297         struct be_rx_obj *rxo;
2298         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2299         int vec, i;
2300
2301         be_async_mcc_disable(adapter);
2302
2303         netif_carrier_off(netdev);
2304         adapter->link_up = false;
2305
2306         if (!lancer_chip(adapter))
2307                 be_intr_set(adapter, false);
2308
2309         for_all_rx_queues(adapter, rxo, i)
2310                 napi_disable(&rxo->rx_eq.napi);
2311
2312         napi_disable(&tx_eq->napi);
2313
2314         if (lancer_chip(adapter)) {
2315                 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2316                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2317                 for_all_rx_queues(adapter, rxo, i)
2318                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2319         }
2320
2321         if (msix_enabled(adapter)) {
2322                 vec = be_msix_vec_get(adapter, tx_eq);
2323                 synchronize_irq(vec);
2324
2325                 for_all_rx_queues(adapter, rxo, i) {
2326                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2327                         synchronize_irq(vec);
2328                 }
2329         } else {
2330                 synchronize_irq(netdev->irq);
2331         }
2332         be_irq_unregister(adapter);
2333
2334         /* Wait for all pending tx completions to arrive so that
2335          * all tx skbs are freed.
2336          */
2337         be_tx_compl_clean(adapter);
2338
2339         return 0;
2340 }
2341
2342 static int be_open(struct net_device *netdev)
2343 {
2344         struct be_adapter *adapter = netdev_priv(netdev);
2345         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2346         struct be_rx_obj *rxo;
2347         bool link_up;
2348         int status, i;
2349         u8 mac_speed;
2350         u16 link_speed;
2351
2352         for_all_rx_queues(adapter, rxo, i) {
2353                 be_post_rx_frags(rxo, GFP_KERNEL);
2354                 napi_enable(&rxo->rx_eq.napi);
2355         }
2356         napi_enable(&tx_eq->napi);
2357
2358         be_irq_register(adapter);
2359
2360         if (!lancer_chip(adapter))
2361                 be_intr_set(adapter, true);
2362
2363         /* The evt queues are created in unarmed state; arm them */
2364         for_all_rx_queues(adapter, rxo, i) {
2365                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2366                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2367         }
2368         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2369
2370         /* Now that interrupts are on we can process async mcc */
2371         be_async_mcc_enable(adapter);
2372
2373         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2374                         &link_speed, 0);
2375         if (status)
2376                 goto err;
2377         be_link_status_update(adapter, link_up);
2378
2379         if (be_physfn(adapter)) {
2380                 status = be_vid_config(adapter, false, 0);
2381                 if (status)
2382                         goto err;
2383
2384                 status = be_cmd_set_flow_control(adapter,
2385                                 adapter->tx_fc, adapter->rx_fc);
2386                 if (status)
2387                         goto err;
2388         }
2389
2390         return 0;
2391 err:
2392         be_close(adapter->netdev);
2393         return -EIO;
2394 }
2395
2396 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2397 {
2398         struct be_dma_mem cmd;
2399         int status = 0;
2400         u8 mac[ETH_ALEN];
2401
2402         memset(mac, 0, ETH_ALEN);
2403
2404         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2405         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2406                                     GFP_KERNEL);
2407         if (cmd.va == NULL)
2408                 return -1;
2409         memset(cmd.va, 0, cmd.size);
2410
2411         if (enable) {
2412                 status = pci_write_config_dword(adapter->pdev,
2413                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2414                 if (status) {
2415                         dev_err(&adapter->pdev->dev,
2416                                 "Could not enable Wake-on-lan\n");
2417                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2418                                           cmd.dma);
2419                         return status;
2420                 }
2421                 status = be_cmd_enable_magic_wol(adapter,
2422                                 adapter->netdev->dev_addr, &cmd);
2423                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2424                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2425         } else {
2426                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2427                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2428                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2429         }
2430
2431         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2432         return status;
2433 }
2434
2435 /*
2436  * Generate a seed MAC address from the PF MAC Address using jhash.
2437  * MAC Address for VFs are assigned incrementally starting from the seed.
2438  * These addresses are programmed in the ASIC by the PF and the VF driver
2439  * queries for the MAC address during its probe.
2440  */
2441 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2442 {
2443         u32 vf = 0;
2444         int status = 0;
2445         u8 mac[ETH_ALEN];
2446
2447         be_vf_eth_addr_generate(adapter, mac);
2448
2449         for (vf = 0; vf < num_vfs; vf++) {
2450                 status = be_cmd_pmac_add(adapter, mac,
2451                                         adapter->vf_cfg[vf].vf_if_handle,
2452                                         &adapter->vf_cfg[vf].vf_pmac_id,
2453                                         vf + 1);
2454                 if (status)
2455                         dev_err(&adapter->pdev->dev,
2456                                 "Mac address add failed for VF %d\n", vf);
2457                 else
2458                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2459
2460                 mac[5] += 1;
2461         }
2462         return status;
2463 }
2464
2465 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2466 {
2467         u32 vf;
2468
2469         for (vf = 0; vf < num_vfs; vf++) {
2470                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2471                         be_cmd_pmac_del(adapter,
2472                                         adapter->vf_cfg[vf].vf_if_handle,
2473                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2474         }
2475 }
2476
2477 static int be_setup(struct be_adapter *adapter)
2478 {
2479         struct net_device *netdev = adapter->netdev;
2480         u32 cap_flags, en_flags, vf = 0;
2481         int status;
2482         u8 mac[ETH_ALEN];
2483
2484         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2485                                 BE_IF_FLAGS_BROADCAST |
2486                                 BE_IF_FLAGS_MULTICAST;
2487
2488         if (be_physfn(adapter)) {
2489                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2490                                 BE_IF_FLAGS_PROMISCUOUS |
2491                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2492                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2493
2494                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2495                         cap_flags |= BE_IF_FLAGS_RSS;
2496                         en_flags |= BE_IF_FLAGS_RSS;
2497                 }
2498         }
2499
2500         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2501                         netdev->dev_addr, false/* pmac_invalid */,
2502                         &adapter->if_handle, &adapter->pmac_id, 0);
2503         if (status != 0)
2504                 goto do_none;
2505
2506         if (be_physfn(adapter)) {
2507                 if (adapter->sriov_enabled) {
2508                         while (vf < num_vfs) {
2509                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2510                                                         BE_IF_FLAGS_BROADCAST;
2511                                 status = be_cmd_if_create(adapter, cap_flags,
2512                                         en_flags, mac, true,
2513                                         &adapter->vf_cfg[vf].vf_if_handle,
2514                                         NULL, vf+1);
2515                                 if (status) {
2516                                         dev_err(&adapter->pdev->dev,
2517                                         "Interface Create failed for VF %d\n",
2518                                         vf);
2519                                         goto if_destroy;
2520                                 }
2521                                 adapter->vf_cfg[vf].vf_pmac_id =
2522                                                         BE_INVALID_PMAC_ID;
2523                                 vf++;
2524                         }
2525                 }
2526         } else {
2527                 status = be_cmd_mac_addr_query(adapter, mac,
2528                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2529                 if (!status) {
2530                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2531                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2532                 }
2533         }
2534
2535         status = be_tx_queues_create(adapter);
2536         if (status != 0)
2537                 goto if_destroy;
2538
2539         status = be_rx_queues_create(adapter);
2540         if (status != 0)
2541                 goto tx_qs_destroy;
2542
2543         status = be_mcc_queues_create(adapter);
2544         if (status != 0)
2545                 goto rx_qs_destroy;
2546
2547         adapter->link_speed = -1;
2548
2549         return 0;
2550
2551 rx_qs_destroy:
2552         be_rx_queues_destroy(adapter);
2553 tx_qs_destroy:
2554         be_tx_queues_destroy(adapter);
2555 if_destroy:
2556         if (be_physfn(adapter) && adapter->sriov_enabled)
2557                 for (vf = 0; vf < num_vfs; vf++)
2558                         if (adapter->vf_cfg[vf].vf_if_handle)
2559                                 be_cmd_if_destroy(adapter,
2560                                         adapter->vf_cfg[vf].vf_if_handle,
2561                                         vf + 1);
2562         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2563 do_none:
2564         return status;
2565 }
2566
2567 static int be_clear(struct be_adapter *adapter)
2568 {
2569         int vf;
2570
2571         if (be_physfn(adapter) && adapter->sriov_enabled)
2572                 be_vf_eth_addr_rem(adapter);
2573
2574         be_mcc_queues_destroy(adapter);
2575         be_rx_queues_destroy(adapter);
2576         be_tx_queues_destroy(adapter);
2577         adapter->eq_next_idx = 0;
2578
2579         if (be_physfn(adapter) && adapter->sriov_enabled)
2580                 for (vf = 0; vf < num_vfs; vf++)
2581                         if (adapter->vf_cfg[vf].vf_if_handle)
2582                                 be_cmd_if_destroy(adapter,
2583                                         adapter->vf_cfg[vf].vf_if_handle,
2584                                         vf + 1);
2585
2586         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2587
2588         /* tell fw we're done with firing cmds */
2589         be_cmd_fw_clean(adapter);
2590         return 0;
2591 }
2592
2593
2594 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2595 static bool be_flash_redboot(struct be_adapter *adapter,
2596                         const u8 *p, u32 img_start, int image_size,
2597                         int hdr_size)
2598 {
2599         u32 crc_offset;
2600         u8 flashed_crc[4];
2601         int status;
2602
2603         crc_offset = hdr_size + img_start + image_size - 4;
2604
2605         p += crc_offset;
2606
2607         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2608                         (image_size - 4));
2609         if (status) {
2610                 dev_err(&adapter->pdev->dev,
2611                 "could not get crc from flash, not flashing redboot\n");
2612                 return false;
2613         }
2614
2615         /*update redboot only if crc does not match*/
2616         if (!memcmp(flashed_crc, p, 4))
2617                 return false;
2618         else
2619                 return true;
2620 }
2621
2622 static int be_flash_data(struct be_adapter *adapter,
2623                         const struct firmware *fw,
2624                         struct be_dma_mem *flash_cmd, int num_of_images)
2625
2626 {
2627         int status = 0, i, filehdr_size = 0;
2628         u32 total_bytes = 0, flash_op;
2629         int num_bytes;
2630         const u8 *p = fw->data;
2631         struct be_cmd_write_flashrom *req = flash_cmd->va;
2632         const struct flash_comp *pflashcomp;
2633         int num_comp;
2634
2635         static const struct flash_comp gen3_flash_types[9] = {
2636                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2637                         FLASH_IMAGE_MAX_SIZE_g3},
2638                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2639                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2640                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2641                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2642                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2643                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2644                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2645                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2646                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2647                         FLASH_IMAGE_MAX_SIZE_g3},
2648                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2649                         FLASH_IMAGE_MAX_SIZE_g3},
2650                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2651                         FLASH_IMAGE_MAX_SIZE_g3},
2652                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2653                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2654         };
2655         static const struct flash_comp gen2_flash_types[8] = {
2656                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2657                         FLASH_IMAGE_MAX_SIZE_g2},
2658                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2659                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2660                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2661                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2662                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2663                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2664                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2665                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2666                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2667                         FLASH_IMAGE_MAX_SIZE_g2},
2668                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2669                         FLASH_IMAGE_MAX_SIZE_g2},
2670                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2671                          FLASH_IMAGE_MAX_SIZE_g2}
2672         };
2673
2674         if (adapter->generation == BE_GEN3) {
2675                 pflashcomp = gen3_flash_types;
2676                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2677                 num_comp = ARRAY_SIZE(gen3_flash_types);
2678         } else {
2679                 pflashcomp = gen2_flash_types;
2680                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2681                 num_comp = ARRAY_SIZE(gen2_flash_types);
2682         }
2683         for (i = 0; i < num_comp; i++) {
2684                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2685                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2686                         continue;
2687                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2688                         (!be_flash_redboot(adapter, fw->data,
2689                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2690                         (num_of_images * sizeof(struct image_hdr)))))
2691                         continue;
2692                 p = fw->data;
2693                 p += filehdr_size + pflashcomp[i].offset
2694                         + (num_of_images * sizeof(struct image_hdr));
2695         if (p + pflashcomp[i].size > fw->data + fw->size)
2696                 return -1;
2697         total_bytes = pflashcomp[i].size;
2698                 while (total_bytes) {
2699                         if (total_bytes > 32*1024)
2700                                 num_bytes = 32*1024;
2701                         else
2702                                 num_bytes = total_bytes;
2703                         total_bytes -= num_bytes;
2704
2705                         if (!total_bytes)
2706                                 flash_op = FLASHROM_OPER_FLASH;
2707                         else
2708                                 flash_op = FLASHROM_OPER_SAVE;
2709                         memcpy(req->params.data_buf, p, num_bytes);
2710                         p += num_bytes;
2711                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2712                                 pflashcomp[i].optype, flash_op, num_bytes);
2713                         if (status) {
2714                                 dev_err(&adapter->pdev->dev,
2715                                         "cmd to write to flash rom failed.\n");
2716                                 return -1;
2717                         }
2718                 }
2719         }
2720         return 0;
2721 }
2722
2723 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2724 {
2725         if (fhdr == NULL)
2726                 return 0;
2727         if (fhdr->build[0] == '3')
2728                 return BE_GEN3;
2729         else if (fhdr->build[0] == '2')
2730                 return BE_GEN2;
2731         else
2732                 return 0;
2733 }
2734
2735 static int lancer_fw_download(struct be_adapter *adapter,
2736                                 const struct firmware *fw)
2737 {
2738 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2739 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2740         struct be_dma_mem flash_cmd;
2741         const u8 *data_ptr = NULL;
2742         u8 *dest_image_ptr = NULL;
2743         size_t image_size = 0;
2744         u32 chunk_size = 0;
2745         u32 data_written = 0;
2746         u32 offset = 0;
2747         int status = 0;
2748         u8 add_status = 0;
2749
2750         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2751                 dev_err(&adapter->pdev->dev,
2752                         "FW Image not properly aligned. "
2753                         "Length must be 4 byte aligned.\n");
2754                 status = -EINVAL;
2755                 goto lancer_fw_exit;
2756         }
2757
2758         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2759                                 + LANCER_FW_DOWNLOAD_CHUNK;
2760         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2761                                                 &flash_cmd.dma, GFP_KERNEL);
2762         if (!flash_cmd.va) {
2763                 status = -ENOMEM;
2764                 dev_err(&adapter->pdev->dev,
2765                         "Memory allocation failure while flashing\n");
2766                 goto lancer_fw_exit;
2767         }
2768
2769         dest_image_ptr = flash_cmd.va +
2770                                 sizeof(struct lancer_cmd_req_write_object);
2771         image_size = fw->size;
2772         data_ptr = fw->data;
2773
2774         while (image_size) {
2775                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2776
2777                 /* Copy the image chunk content. */
2778                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2779
2780                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2781                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2782                                 &data_written, &add_status);
2783
2784                 if (status)
2785                         break;
2786
2787                 offset += data_written;
2788                 data_ptr += data_written;
2789                 image_size -= data_written;
2790         }
2791
2792         if (!status) {
2793                 /* Commit the FW written */
2794                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2795                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2796                                         &data_written, &add_status);
2797         }
2798
2799         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2800                                 flash_cmd.dma);
2801         if (status) {
2802                 dev_err(&adapter->pdev->dev,
2803                         "Firmware load error. "
2804                         "Status code: 0x%x Additional Status: 0x%x\n",
2805                         status, add_status);
2806                 goto lancer_fw_exit;
2807         }
2808
2809         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2810 lancer_fw_exit:
2811         return status;
2812 }
2813
2814 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2815 {
2816         struct flash_file_hdr_g2 *fhdr;
2817         struct flash_file_hdr_g3 *fhdr3;
2818         struct image_hdr *img_hdr_ptr = NULL;
2819         struct be_dma_mem flash_cmd;
2820         const u8 *p;
2821         int status = 0, i = 0, num_imgs = 0;
2822
2823         p = fw->data;
2824         fhdr = (struct flash_file_hdr_g2 *) p;
2825
2826         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2827         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2828                                           &flash_cmd.dma, GFP_KERNEL);
2829         if (!flash_cmd.va) {
2830                 status = -ENOMEM;
2831                 dev_err(&adapter->pdev->dev,
2832                         "Memory allocation failure while flashing\n");
2833                 goto be_fw_exit;
2834         }
2835
2836         if ((adapter->generation == BE_GEN3) &&
2837                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2838                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2839                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2840                 for (i = 0; i < num_imgs; i++) {
2841                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2842                                         (sizeof(struct flash_file_hdr_g3) +
2843                                          i * sizeof(struct image_hdr)));
2844                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2845                                 status = be_flash_data(adapter, fw, &flash_cmd,
2846                                                         num_imgs);
2847                 }
2848         } else if ((adapter->generation == BE_GEN2) &&
2849                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2850                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2851         } else {
2852                 dev_err(&adapter->pdev->dev,
2853                         "UFI and Interface are not compatible for flashing\n");
2854                 status = -1;
2855         }
2856
2857         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2858                           flash_cmd.dma);
2859         if (status) {
2860                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2861                 goto be_fw_exit;
2862         }
2863
2864         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2865
2866 be_fw_exit:
2867         return status;
2868 }
2869
2870 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2871 {
2872         const struct firmware *fw;
2873         int status;
2874
2875         if (!netif_running(adapter->netdev)) {
2876                 dev_err(&adapter->pdev->dev,
2877                         "Firmware load not allowed (interface is down)\n");
2878                 return -1;
2879         }
2880
2881         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2882         if (status)
2883                 goto fw_exit;
2884
2885         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2886
2887         if (lancer_chip(adapter))
2888                 status = lancer_fw_download(adapter, fw);
2889         else
2890                 status = be_fw_download(adapter, fw);
2891
2892 fw_exit:
2893         release_firmware(fw);
2894         return status;
2895 }
2896
2897 static struct net_device_ops be_netdev_ops = {
2898         .ndo_open               = be_open,
2899         .ndo_stop               = be_close,
2900         .ndo_start_xmit         = be_xmit,
2901         .ndo_set_rx_mode        = be_set_multicast_list,
2902         .ndo_set_mac_address    = be_mac_addr_set,
2903         .ndo_change_mtu         = be_change_mtu,
2904         .ndo_validate_addr      = eth_validate_addr,
2905         .ndo_vlan_rx_register   = be_vlan_register,
2906         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2907         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2908         .ndo_set_vf_mac         = be_set_vf_mac,
2909         .ndo_set_vf_vlan        = be_set_vf_vlan,
2910         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2911         .ndo_get_vf_config      = be_get_vf_config
2912 };
2913
2914 static void be_netdev_init(struct net_device *netdev)
2915 {
2916         struct be_adapter *adapter = netdev_priv(netdev);
2917         struct be_rx_obj *rxo;
2918         int i;
2919
2920         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2921                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2922                 NETIF_F_HW_VLAN_TX;
2923         if (be_multi_rxq(adapter))
2924                 netdev->hw_features |= NETIF_F_RXHASH;
2925
2926         netdev->features |= netdev->hw_features |
2927                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2928
2929         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2930                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2931
2932         if (lancer_chip(adapter))
2933                 netdev->vlan_features |= NETIF_F_TSO6;
2934
2935         netdev->flags |= IFF_MULTICAST;
2936
2937         /* Default settings for Rx and Tx flow control */
2938         adapter->rx_fc = true;
2939         adapter->tx_fc = true;
2940
2941         netif_set_gso_max_size(netdev, 65535);
2942
2943         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2944
2945         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2946
2947         for_all_rx_queues(adapter, rxo, i)
2948                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2949                                 BE_NAPI_WEIGHT);
2950
2951         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2952                 BE_NAPI_WEIGHT);
2953 }
2954
2955 static void be_unmap_pci_bars(struct be_adapter *adapter)
2956 {
2957         if (adapter->csr)
2958                 iounmap(adapter->csr);
2959         if (adapter->db)
2960                 iounmap(adapter->db);
2961         if (adapter->pcicfg && be_physfn(adapter))
2962                 iounmap(adapter->pcicfg);
2963 }
2964
2965 static int be_map_pci_bars(struct be_adapter *adapter)
2966 {
2967         u8 __iomem *addr;
2968         int pcicfg_reg, db_reg;
2969
2970         if (lancer_chip(adapter)) {
2971                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2972                         pci_resource_len(adapter->pdev, 0));
2973                 if (addr == NULL)
2974                         return -ENOMEM;
2975                 adapter->db = addr;
2976                 return 0;
2977         }
2978
2979         if (be_physfn(adapter)) {
2980                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2981                                 pci_resource_len(adapter->pdev, 2));
2982                 if (addr == NULL)
2983                         return -ENOMEM;
2984                 adapter->csr = addr;
2985         }
2986
2987         if (adapter->generation == BE_GEN2) {
2988                 pcicfg_reg = 1;
2989                 db_reg = 4;
2990         } else {
2991                 pcicfg_reg = 0;
2992                 if (be_physfn(adapter))
2993                         db_reg = 4;
2994                 else
2995                         db_reg = 0;
2996         }
2997         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2998                                 pci_resource_len(adapter->pdev, db_reg));
2999         if (addr == NULL)
3000                 goto pci_map_err;
3001         adapter->db = addr;
3002
3003         if (be_physfn(adapter)) {
3004                 addr = ioremap_nocache(
3005                                 pci_resource_start(adapter->pdev, pcicfg_reg),
3006                                 pci_resource_len(adapter->pdev, pcicfg_reg));
3007                 if (addr == NULL)
3008                         goto pci_map_err;
3009                 adapter->pcicfg = addr;
3010         } else
3011                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
3012
3013         return 0;
3014 pci_map_err:
3015         be_unmap_pci_bars(adapter);
3016         return -ENOMEM;
3017 }
3018
3019
3020 static void be_ctrl_cleanup(struct be_adapter *adapter)
3021 {
3022         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3023
3024         be_unmap_pci_bars(adapter);
3025
3026         if (mem->va)
3027                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3028                                   mem->dma);
3029
3030         mem = &adapter->mc_cmd_mem;
3031         if (mem->va)
3032                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3033                                   mem->dma);
3034 }
3035
3036 static int be_ctrl_init(struct be_adapter *adapter)
3037 {
3038         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3039         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3040         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
3041         int status;
3042
3043         status = be_map_pci_bars(adapter);
3044         if (status)
3045                 goto done;
3046
3047         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3048         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3049                                                 mbox_mem_alloc->size,
3050                                                 &mbox_mem_alloc->dma,
3051                                                 GFP_KERNEL);
3052         if (!mbox_mem_alloc->va) {
3053                 status = -ENOMEM;
3054                 goto unmap_pci_bars;
3055         }
3056
3057         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3058         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3059         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3060         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3061
3062         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
3063         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3064                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
3065                                             GFP_KERNEL);
3066         if (mc_cmd_mem->va == NULL) {
3067                 status = -ENOMEM;
3068                 goto free_mbox;
3069         }
3070         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3071
3072         mutex_init(&adapter->mbox_lock);
3073         spin_lock_init(&adapter->mcc_lock);
3074         spin_lock_init(&adapter->mcc_cq_lock);
3075
3076         init_completion(&adapter->flash_compl);
3077         pci_save_state(adapter->pdev);
3078         return 0;
3079
3080 free_mbox:
3081         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3082                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3083
3084 unmap_pci_bars:
3085         be_unmap_pci_bars(adapter);
3086
3087 done:
3088         return status;
3089 }
3090
3091 static void be_stats_cleanup(struct be_adapter *adapter)
3092 {
3093         struct be_dma_mem *cmd = &adapter->stats_cmd;
3094
3095         if (cmd->va)
3096                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3097                                   cmd->va, cmd->dma);
3098 }
3099
3100 static int be_stats_init(struct be_adapter *adapter)
3101 {
3102         struct be_dma_mem *cmd = &adapter->stats_cmd;
3103
3104         if (adapter->generation == BE_GEN2) {
3105                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3106         } else {
3107                 if (lancer_chip(adapter))
3108                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3109                 else
3110                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3111         }
3112         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3113                                      GFP_KERNEL);
3114         if (cmd->va == NULL)
3115                 return -1;
3116         memset(cmd->va, 0, cmd->size);
3117         return 0;
3118 }
3119
3120 static void __devexit be_remove(struct pci_dev *pdev)
3121 {
3122         struct be_adapter *adapter = pci_get_drvdata(pdev);
3123
3124         if (!adapter)
3125                 return;
3126
3127         cancel_delayed_work_sync(&adapter->work);
3128
3129         unregister_netdev(adapter->netdev);
3130
3131         be_clear(adapter);
3132
3133         be_stats_cleanup(adapter);
3134
3135         be_ctrl_cleanup(adapter);
3136
3137         kfree(adapter->vf_cfg);
3138         be_sriov_disable(adapter);
3139
3140         be_msix_disable(adapter);
3141
3142         pci_set_drvdata(pdev, NULL);
3143         pci_release_regions(pdev);
3144         pci_disable_device(pdev);
3145
3146         free_netdev(adapter->netdev);
3147 }
3148
3149 static int be_get_config(struct be_adapter *adapter)
3150 {
3151         int status;
3152         u8 mac[ETH_ALEN];
3153
3154         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3155         if (status)
3156                 return status;
3157
3158         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3159                         &adapter->function_mode, &adapter->function_caps);
3160         if (status)
3161                 return status;
3162
3163         memset(mac, 0, ETH_ALEN);
3164
3165         /* A default permanent address is given to each VF for Lancer*/
3166         if (be_physfn(adapter) || lancer_chip(adapter)) {
3167                 status = be_cmd_mac_addr_query(adapter, mac,
3168                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3169
3170                 if (status)
3171                         return status;
3172
3173                 if (!is_valid_ether_addr(mac))
3174                         return -EADDRNOTAVAIL;
3175
3176                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3177                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3178         }
3179
3180         if (adapter->function_mode & 0x400)
3181                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3182         else
3183                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3184
3185         status = be_cmd_get_cntl_attributes(adapter);
3186         if (status)
3187                 return status;
3188
3189         be_cmd_check_native_mode(adapter);
3190         return 0;
3191 }
3192
3193 static int be_dev_family_check(struct be_adapter *adapter)
3194 {
3195         struct pci_dev *pdev = adapter->pdev;
3196         u32 sli_intf = 0, if_type;
3197
3198         switch (pdev->device) {
3199         case BE_DEVICE_ID1:
3200         case OC_DEVICE_ID1:
3201                 adapter->generation = BE_GEN2;
3202                 break;
3203         case BE_DEVICE_ID2:
3204         case OC_DEVICE_ID2:
3205                 adapter->generation = BE_GEN3;
3206                 break;
3207         case OC_DEVICE_ID3:
3208         case OC_DEVICE_ID4:
3209                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3210                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3211                                                 SLI_INTF_IF_TYPE_SHIFT;
3212
3213                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3214                         if_type != 0x02) {
3215                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3216                         return -EINVAL;
3217                 }
3218                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3219                                          SLI_INTF_FAMILY_SHIFT);
3220                 adapter->generation = BE_GEN3;
3221                 break;
3222         default:
3223                 adapter->generation = 0;
3224         }
3225         return 0;
3226 }
3227
3228 static int lancer_wait_ready(struct be_adapter *adapter)
3229 {
3230 #define SLIPORT_READY_TIMEOUT 500
3231         u32 sliport_status;
3232         int status = 0, i;
3233
3234         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3235                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3236                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3237                         break;
3238
3239                 msleep(20);
3240         }
3241
3242         if (i == SLIPORT_READY_TIMEOUT)
3243                 status = -1;
3244
3245         return status;
3246 }
3247
3248 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3249 {
3250         int status;
3251         u32 sliport_status, err, reset_needed;
3252         status = lancer_wait_ready(adapter);
3253         if (!status) {
3254                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3255                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3256                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3257                 if (err && reset_needed) {
3258                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3259                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3260
3261                         /* check adapter has corrected the error */
3262                         status = lancer_wait_ready(adapter);
3263                         sliport_status = ioread32(adapter->db +
3264                                                         SLIPORT_STATUS_OFFSET);
3265                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3266                                                 SLIPORT_STATUS_RN_MASK);
3267                         if (status || sliport_status)
3268                                 status = -1;
3269                 } else if (err || reset_needed) {
3270                         status = -1;
3271                 }
3272         }
3273         return status;
3274 }
3275
3276 static int __devinit be_probe(struct pci_dev *pdev,
3277                         const struct pci_device_id *pdev_id)
3278 {
3279         int status = 0;
3280         struct be_adapter *adapter;
3281         struct net_device *netdev;
3282
3283         status = pci_enable_device(pdev);
3284         if (status)
3285                 goto do_none;
3286
3287         status = pci_request_regions(pdev, DRV_NAME);
3288         if (status)
3289                 goto disable_dev;
3290         pci_set_master(pdev);
3291
3292         netdev = alloc_etherdev(sizeof(struct be_adapter));
3293         if (netdev == NULL) {
3294                 status = -ENOMEM;
3295                 goto rel_reg;
3296         }
3297         adapter = netdev_priv(netdev);
3298         adapter->pdev = pdev;
3299         pci_set_drvdata(pdev, adapter);
3300
3301         status = be_dev_family_check(adapter);
3302         if (status)
3303                 goto free_netdev;
3304
3305         adapter->netdev = netdev;
3306         SET_NETDEV_DEV(netdev, &pdev->dev);
3307
3308         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3309         if (!status) {
3310                 netdev->features |= NETIF_F_HIGHDMA;
3311         } else {
3312                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3313                 if (status) {
3314                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3315                         goto free_netdev;
3316                 }
3317         }
3318
3319         be_sriov_enable(adapter);
3320         if (adapter->sriov_enabled) {
3321                 adapter->vf_cfg = kcalloc(num_vfs,
3322                         sizeof(struct be_vf_cfg), GFP_KERNEL);
3323
3324                 if (!adapter->vf_cfg)
3325                         goto free_netdev;
3326         }
3327
3328         status = be_ctrl_init(adapter);
3329         if (status)
3330                 goto free_vf_cfg;
3331
3332         if (lancer_chip(adapter)) {
3333                 status = lancer_test_and_set_rdy_state(adapter);
3334                 if (status) {
3335                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3336                         goto ctrl_clean;
3337                 }
3338         }
3339
3340         /* sync up with fw's ready state */
3341         if (be_physfn(adapter)) {
3342                 status = be_cmd_POST(adapter);
3343                 if (status)
3344                         goto ctrl_clean;
3345         }
3346
3347         /* tell fw we're ready to fire cmds */
3348         status = be_cmd_fw_init(adapter);
3349         if (status)
3350                 goto ctrl_clean;
3351
3352         status = be_cmd_reset_function(adapter);
3353         if (status)
3354                 goto ctrl_clean;
3355
3356         status = be_stats_init(adapter);
3357         if (status)
3358                 goto ctrl_clean;
3359
3360         status = be_get_config(adapter);
3361         if (status)
3362                 goto stats_clean;
3363
3364         be_msix_enable(adapter);
3365
3366         INIT_DELAYED_WORK(&adapter->work, be_worker);
3367
3368         status = be_setup(adapter);
3369         if (status)
3370                 goto msix_disable;
3371
3372         be_netdev_init(netdev);
3373         status = register_netdev(netdev);
3374         if (status != 0)
3375                 goto unsetup;
3376         netif_carrier_off(netdev);
3377
3378         if (be_physfn(adapter) && adapter->sriov_enabled) {
3379                 u8 mac_speed;
3380                 bool link_up;
3381                 u16 vf, lnk_speed;
3382
3383                 if (!lancer_chip(adapter)) {
3384                         status = be_vf_eth_addr_config(adapter);
3385                         if (status)
3386                                 goto unreg_netdev;
3387                 }
3388
3389                 for (vf = 0; vf < num_vfs; vf++) {
3390                         status = be_cmd_link_status_query(adapter, &link_up,
3391                                         &mac_speed, &lnk_speed, vf + 1);
3392                         if (!status)
3393                                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3394                         else
3395                                 goto unreg_netdev;
3396                 }
3397         }
3398
3399         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3400         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3401         return 0;
3402
3403 unreg_netdev:
3404         unregister_netdev(netdev);
3405 unsetup:
3406         be_clear(adapter);
3407 msix_disable:
3408         be_msix_disable(adapter);
3409 stats_clean:
3410         be_stats_cleanup(adapter);
3411 ctrl_clean:
3412         be_ctrl_cleanup(adapter);
3413 free_vf_cfg:
3414         kfree(adapter->vf_cfg);
3415 free_netdev:
3416         be_sriov_disable(adapter);
3417         free_netdev(netdev);
3418         pci_set_drvdata(pdev, NULL);
3419 rel_reg:
3420         pci_release_regions(pdev);
3421 disable_dev:
3422         pci_disable_device(pdev);
3423 do_none:
3424         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3425         return status;
3426 }
3427
3428 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3429 {
3430         struct be_adapter *adapter = pci_get_drvdata(pdev);
3431         struct net_device *netdev =  adapter->netdev;
3432
3433         cancel_delayed_work_sync(&adapter->work);
3434         if (adapter->wol)
3435                 be_setup_wol(adapter, true);
3436
3437         netif_device_detach(netdev);
3438         if (netif_running(netdev)) {
3439                 rtnl_lock();
3440                 be_close(netdev);
3441                 rtnl_unlock();
3442         }
3443         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3444         be_clear(adapter);
3445
3446         be_msix_disable(adapter);
3447         pci_save_state(pdev);
3448         pci_disable_device(pdev);
3449         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3450         return 0;
3451 }
3452
3453 static int be_resume(struct pci_dev *pdev)
3454 {
3455         int status = 0;
3456         struct be_adapter *adapter = pci_get_drvdata(pdev);
3457         struct net_device *netdev =  adapter->netdev;
3458
3459         netif_device_detach(netdev);
3460
3461         status = pci_enable_device(pdev);
3462         if (status)
3463                 return status;
3464
3465         pci_set_power_state(pdev, 0);
3466         pci_restore_state(pdev);
3467
3468         be_msix_enable(adapter);
3469         /* tell fw we're ready to fire cmds */
3470         status = be_cmd_fw_init(adapter);
3471         if (status)
3472                 return status;
3473
3474         be_setup(adapter);
3475         if (netif_running(netdev)) {
3476                 rtnl_lock();
3477                 be_open(netdev);
3478                 rtnl_unlock();
3479         }
3480         netif_device_attach(netdev);
3481
3482         if (adapter->wol)
3483                 be_setup_wol(adapter, false);
3484
3485         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3486         return 0;
3487 }
3488
3489 /*
3490  * An FLR will stop BE from DMAing any data.
3491  */
3492 static void be_shutdown(struct pci_dev *pdev)
3493 {
3494         struct be_adapter *adapter = pci_get_drvdata(pdev);
3495
3496         if (!adapter)
3497                 return;
3498
3499         cancel_delayed_work_sync(&adapter->work);
3500
3501         netif_device_detach(adapter->netdev);
3502
3503         if (adapter->wol)
3504                 be_setup_wol(adapter, true);
3505
3506         be_cmd_reset_function(adapter);
3507
3508         pci_disable_device(pdev);
3509 }
3510
3511 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3512                                 pci_channel_state_t state)
3513 {
3514         struct be_adapter *adapter = pci_get_drvdata(pdev);
3515         struct net_device *netdev =  adapter->netdev;
3516
3517         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3518
3519         adapter->eeh_err = true;
3520
3521         netif_device_detach(netdev);
3522
3523         if (netif_running(netdev)) {
3524                 rtnl_lock();
3525                 be_close(netdev);
3526                 rtnl_unlock();
3527         }
3528         be_clear(adapter);
3529
3530         if (state == pci_channel_io_perm_failure)
3531                 return PCI_ERS_RESULT_DISCONNECT;
3532
3533         pci_disable_device(pdev);
3534
3535         return PCI_ERS_RESULT_NEED_RESET;
3536 }
3537
3538 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3539 {
3540         struct be_adapter *adapter = pci_get_drvdata(pdev);
3541         int status;
3542
3543         dev_info(&adapter->pdev->dev, "EEH reset\n");
3544         adapter->eeh_err = false;
3545
3546         status = pci_enable_device(pdev);
3547         if (status)
3548                 return PCI_ERS_RESULT_DISCONNECT;
3549
3550         pci_set_master(pdev);
3551         pci_set_power_state(pdev, 0);
3552         pci_restore_state(pdev);
3553
3554         /* Check if card is ok and fw is ready */
3555         status = be_cmd_POST(adapter);
3556         if (status)
3557                 return PCI_ERS_RESULT_DISCONNECT;
3558
3559         return PCI_ERS_RESULT_RECOVERED;
3560 }
3561
3562 static void be_eeh_resume(struct pci_dev *pdev)
3563 {
3564         int status = 0;
3565         struct be_adapter *adapter = pci_get_drvdata(pdev);
3566         struct net_device *netdev =  adapter->netdev;
3567
3568         dev_info(&adapter->pdev->dev, "EEH resume\n");
3569
3570         pci_save_state(pdev);
3571
3572         /* tell fw we're ready to fire cmds */
3573         status = be_cmd_fw_init(adapter);
3574         if (status)
3575                 goto err;
3576
3577         status = be_setup(adapter);
3578         if (status)
3579                 goto err;
3580
3581         if (netif_running(netdev)) {
3582                 status = be_open(netdev);
3583                 if (status)
3584                         goto err;
3585         }
3586         netif_device_attach(netdev);
3587         return;
3588 err:
3589         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3590 }
3591
3592 static struct pci_error_handlers be_eeh_handlers = {
3593         .error_detected = be_eeh_err_detected,
3594         .slot_reset = be_eeh_reset,
3595         .resume = be_eeh_resume,
3596 };
3597
3598 static struct pci_driver be_driver = {
3599         .name = DRV_NAME,
3600         .id_table = be_dev_ids,
3601         .probe = be_probe,
3602         .remove = be_remove,
3603         .suspend = be_suspend,
3604         .resume = be_resume,
3605         .shutdown = be_shutdown,
3606         .err_handler = &be_eeh_handlers
3607 };
3608
3609 static int __init be_init_module(void)
3610 {
3611         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3612             rx_frag_size != 2048) {
3613                 printk(KERN_WARNING DRV_NAME
3614                         " : Module param rx_frag_size must be 2048/4096/8192."
3615                         " Using 2048\n");
3616                 rx_frag_size = 2048;
3617         }
3618
3619         return pci_register_driver(&be_driver);
3620 }
3621 module_init(be_init_module);
3622
3623 static void __exit be_exit_module(void)
3624 {
3625         pci_unregister_driver(&be_driver);
3626 }
3627 module_exit(be_exit_module);