Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next
[firefly-linux-kernel-4.4.55.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
22
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
28
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43         { 0 }
44 };
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
48         "CEV",
49         "CTX",
50         "DBUF",
51         "ERX",
52         "Host",
53         "MPU",
54         "NDMA",
55         "PTC ",
56         "RDMA ",
57         "RXF ",
58         "RXIPS ",
59         "RXULP0 ",
60         "RXULP1 ",
61         "RXULP2 ",
62         "TIM ",
63         "TPOST ",
64         "TPRE ",
65         "TXIPS ",
66         "TXULP0 ",
67         "TXULP1 ",
68         "UC ",
69         "WDMA ",
70         "TXULP2 ",
71         "HOST1 ",
72         "P0_OB_LINK ",
73         "P1_OB_LINK ",
74         "HOST_GPIO ",
75         "MBOX ",
76         "AXGMAC0",
77         "AXGMAC1",
78         "JTAG",
79         "MPU_INTPEND"
80 };
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
83         "LPCMEMHOST",
84         "MGMT_MAC",
85         "PCS0ONLINE",
86         "MPU_IRAM",
87         "PCS1ONLINE",
88         "PCTL0",
89         "PCTL1",
90         "PMEM",
91         "RR",
92         "TXPB",
93         "RXPP",
94         "XAUI",
95         "TXP",
96         "ARM",
97         "IPC",
98         "HOST2",
99         "HOST3",
100         "HOST4",
101         "HOST5",
102         "HOST6",
103         "HOST7",
104         "HOST8",
105         "HOST9",
106         "NETC",
107         "Unknown",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown"
115 };
116
117 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118 {
119         struct be_dma_mem *mem = &q->dma_mem;
120         if (mem->va)
121                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122                                   mem->dma);
123 }
124
125 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126                 u16 len, u16 entry_size)
127 {
128         struct be_dma_mem *mem = &q->dma_mem;
129
130         memset(q, 0, sizeof(*q));
131         q->len = len;
132         q->entry_size = entry_size;
133         mem->size = len * entry_size;
134         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135                                      GFP_KERNEL);
136         if (!mem->va)
137                 return -1;
138         memset(mem->va, 0, mem->size);
139         return 0;
140 }
141
142 static void be_intr_set(struct be_adapter *adapter, bool enable)
143 {
144         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
145         u32 reg = ioread32(addr);
146         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
147
148         if (adapter->eeh_err)
149                 return;
150
151         if (!enabled && enable)
152                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153         else if (enabled && !enable)
154                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155         else
156                 return;
157
158         iowrite32(reg, addr);
159 }
160
161 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
162 {
163         u32 val = 0;
164         val |= qid & DB_RQ_RING_ID_MASK;
165         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
166
167         wmb();
168         iowrite32(val, adapter->db + DB_RQ_OFFSET);
169 }
170
171 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
172 {
173         u32 val = 0;
174         val |= qid & DB_TXULP_RING_ID_MASK;
175         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
176
177         wmb();
178         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
179 }
180
181 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
182                 bool arm, bool clear_int, u16 num_popped)
183 {
184         u32 val = 0;
185         val |= qid & DB_EQ_RING_ID_MASK;
186         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
188
189         if (adapter->eeh_err)
190                 return;
191
192         if (arm)
193                 val |= 1 << DB_EQ_REARM_SHIFT;
194         if (clear_int)
195                 val |= 1 << DB_EQ_CLR_SHIFT;
196         val |= 1 << DB_EQ_EVNT_SHIFT;
197         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
198         iowrite32(val, adapter->db + DB_EQ_OFFSET);
199 }
200
201 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
202 {
203         u32 val = 0;
204         val |= qid & DB_CQ_RING_ID_MASK;
205         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
207
208         if (adapter->eeh_err)
209                 return;
210
211         if (arm)
212                 val |= 1 << DB_CQ_REARM_SHIFT;
213         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
214         iowrite32(val, adapter->db + DB_CQ_OFFSET);
215 }
216
217 static int be_mac_addr_set(struct net_device *netdev, void *p)
218 {
219         struct be_adapter *adapter = netdev_priv(netdev);
220         struct sockaddr *addr = p;
221         int status = 0;
222
223         if (!is_valid_ether_addr(addr->sa_data))
224                 return -EADDRNOTAVAIL;
225
226         /* MAC addr configuration will be done in hardware for VFs
227          * by their corresponding PFs. Just copy to netdev addr here
228          */
229         if (!be_physfn(adapter))
230                 goto netdev_addr;
231
232         status = be_cmd_pmac_del(adapter, adapter->if_handle,
233                                 adapter->pmac_id, 0);
234         if (status)
235                 return status;
236
237         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238                                 adapter->if_handle, &adapter->pmac_id, 0);
239 netdev_addr:
240         if (!status)
241                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243         return status;
244 }
245
246 static void populate_be2_stats(struct be_adapter *adapter)
247 {
248
249         struct be_drv_stats *drvs = &adapter->drv_stats;
250         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
251         struct be_port_rxf_stats_v0 *port_stats =
252                 be_port_rxf_stats_from_cmd(adapter);
253         struct be_rxf_stats_v0 *rxf_stats =
254                 be_rxf_stats_from_cmd(adapter);
255
256         drvs->rx_pause_frames = port_stats->rx_pause_frames;
257         drvs->rx_crc_errors = port_stats->rx_crc_errors;
258         drvs->rx_control_frames = port_stats->rx_control_frames;
259         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
270         drvs->rx_input_fifo_overflow_drop =
271                 port_stats->rx_input_fifo_overflow;
272         drvs->rx_dropped_header_too_small =
273                 port_stats->rx_dropped_header_too_small;
274         drvs->rx_address_match_errors =
275                 port_stats->rx_address_match_errors;
276         drvs->rx_alignment_symbol_errors =
277                 port_stats->rx_alignment_symbol_errors;
278
279         drvs->tx_pauseframes = port_stats->tx_pauseframes;
280         drvs->tx_controlframes = port_stats->tx_controlframes;
281
282         if (adapter->port_num)
283                 drvs->jabber_events =
284                         rxf_stats->port1_jabber_events;
285         else
286                 drvs->jabber_events =
287                         rxf_stats->port0_jabber_events;
288         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
289         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
290         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
291         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
292         drvs->forwarded_packets = rxf_stats->forwarded_packets;
293         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
294         drvs->rx_drops_no_tpre_descr =
295                 rxf_stats->rx_drops_no_tpre_descr;
296         drvs->rx_drops_too_many_frags =
297                 rxf_stats->rx_drops_too_many_frags;
298         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
299 }
300
301 static void populate_be3_stats(struct be_adapter *adapter)
302 {
303         struct be_drv_stats *drvs = &adapter->drv_stats;
304         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
305
306         struct be_rxf_stats_v1 *rxf_stats =
307                 be_rxf_stats_from_cmd(adapter);
308         struct be_port_rxf_stats_v1 *port_stats =
309                 be_port_rxf_stats_from_cmd(adapter);
310
311         drvs->rx_priority_pause_frames = 0;
312         drvs->pmem_fifo_overflow_drop = 0;
313         drvs->rx_pause_frames = port_stats->rx_pause_frames;
314         drvs->rx_crc_errors = port_stats->rx_crc_errors;
315         drvs->rx_control_frames = port_stats->rx_control_frames;
316         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
317         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
318         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
319         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
320         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
321         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
322         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
323         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
324         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
325         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
326         drvs->rx_dropped_header_too_small =
327                 port_stats->rx_dropped_header_too_small;
328         drvs->rx_input_fifo_overflow_drop =
329                 port_stats->rx_input_fifo_overflow_drop;
330         drvs->rx_address_match_errors =
331                 port_stats->rx_address_match_errors;
332         drvs->rx_alignment_symbol_errors =
333                 port_stats->rx_alignment_symbol_errors;
334         drvs->rxpp_fifo_overflow_drop =
335                 port_stats->rxpp_fifo_overflow_drop;
336         drvs->tx_pauseframes = port_stats->tx_pauseframes;
337         drvs->tx_controlframes = port_stats->tx_controlframes;
338         drvs->jabber_events = port_stats->jabber_events;
339         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
340         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
341         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
342         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
343         drvs->forwarded_packets = rxf_stats->forwarded_packets;
344         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
345         drvs->rx_drops_no_tpre_descr =
346                 rxf_stats->rx_drops_no_tpre_descr;
347         drvs->rx_drops_too_many_frags =
348                 rxf_stats->rx_drops_too_many_frags;
349         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
350 }
351
352 static void populate_lancer_stats(struct be_adapter *adapter)
353 {
354
355         struct be_drv_stats *drvs = &adapter->drv_stats;
356         struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
357                                                 (adapter);
358         drvs->rx_priority_pause_frames = 0;
359         drvs->pmem_fifo_overflow_drop = 0;
360         drvs->rx_pause_frames =
361                 make_64bit_val(pport_stats->rx_pause_frames_hi,
362                                  pport_stats->rx_pause_frames_lo);
363         drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
364                                                 pport_stats->rx_crc_errors_lo);
365         drvs->rx_control_frames =
366                         make_64bit_val(pport_stats->rx_control_frames_hi,
367                         pport_stats->rx_control_frames_lo);
368         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
369         drvs->rx_frame_too_long =
370                 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
371                                         pport_stats->rx_frames_too_long_lo);
372         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
373         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
374         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
375         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
376         drvs->rx_dropped_tcp_length =
377                                 pport_stats->rx_dropped_invalid_tcp_length;
378         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
379         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
380         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
381         drvs->rx_dropped_header_too_small =
382                                 pport_stats->rx_dropped_header_too_small;
383         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
384         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
385         drvs->rx_alignment_symbol_errors =
386                 make_64bit_val(pport_stats->rx_symbol_errors_hi,
387                                 pport_stats->rx_symbol_errors_lo);
388         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
389         drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
390                                         pport_stats->tx_pause_frames_lo);
391         drvs->tx_controlframes =
392                 make_64bit_val(pport_stats->tx_control_frames_hi,
393                                 pport_stats->tx_control_frames_lo);
394         drvs->jabber_events = pport_stats->rx_jabbers;
395         drvs->rx_drops_no_pbuf = 0;
396         drvs->rx_drops_no_txpb = 0;
397         drvs->rx_drops_no_erx_descr = 0;
398         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
399         drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
400                                                 pport_stats->num_forwards_lo);
401         drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
402                                                 pport_stats->rx_drops_mtu_lo);
403         drvs->rx_drops_no_tpre_descr = 0;
404         drvs->rx_drops_too_many_frags =
405                 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
406                                 pport_stats->rx_drops_too_many_frags_lo);
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         if (adapter->generation == BE_GEN3) {
412                 if (lancer_chip(adapter))
413                         populate_lancer_stats(adapter);
414                  else
415                         populate_be3_stats(adapter);
416         } else {
417                 populate_be2_stats(adapter);
418         }
419 }
420
421 void netdev_stats_update(struct be_adapter *adapter)
422 {
423         struct be_drv_stats *drvs = &adapter->drv_stats;
424         struct net_device_stats *dev_stats = &adapter->netdev->stats;
425         struct be_rx_obj *rxo;
426         struct be_tx_obj *txo;
427         unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
428         int i;
429
430         for_all_rx_queues(adapter, rxo, i) {
431                 pkts += rx_stats(rxo)->rx_pkts;
432                 bytes += rx_stats(rxo)->rx_bytes;
433                 mcast += rx_stats(rxo)->rx_mcast_pkts;
434                 drops += rx_stats(rxo)->rx_dropped;
435                 /*  no space in linux buffers: best possible approximation */
436                 if (adapter->generation == BE_GEN3) {
437                         if (!(lancer_chip(adapter))) {
438                                 struct be_erx_stats_v1 *erx =
439                                         be_erx_stats_from_cmd(adapter);
440                                 drops += erx->rx_drops_no_fragments[rxo->q.id];
441                         }
442                 } else {
443                         struct be_erx_stats_v0 *erx =
444                                         be_erx_stats_from_cmd(adapter);
445                         drops += erx->rx_drops_no_fragments[rxo->q.id];
446                 }
447         }
448         dev_stats->rx_packets = pkts;
449         dev_stats->rx_bytes = bytes;
450         dev_stats->multicast = mcast;
451         dev_stats->rx_dropped = drops;
452
453         pkts = bytes = 0;
454         for_all_tx_queues(adapter, txo, i) {
455                 pkts += tx_stats(txo)->be_tx_pkts;
456                 bytes += tx_stats(txo)->be_tx_bytes;
457         }
458         dev_stats->tx_packets = pkts;
459         dev_stats->tx_bytes = bytes;
460
461         /* bad pkts received */
462         dev_stats->rx_errors = drvs->rx_crc_errors +
463                 drvs->rx_alignment_symbol_errors +
464                 drvs->rx_in_range_errors +
465                 drvs->rx_out_range_errors +
466                 drvs->rx_frame_too_long +
467                 drvs->rx_dropped_too_small +
468                 drvs->rx_dropped_too_short +
469                 drvs->rx_dropped_header_too_small +
470                 drvs->rx_dropped_tcp_length +
471                 drvs->rx_dropped_runt +
472                 drvs->rx_tcp_checksum_errs +
473                 drvs->rx_ip_checksum_errs +
474                 drvs->rx_udp_checksum_errs;
475
476         /* detailed rx errors */
477         dev_stats->rx_length_errors = drvs->rx_in_range_errors +
478                 drvs->rx_out_range_errors +
479                 drvs->rx_frame_too_long;
480
481         dev_stats->rx_crc_errors = drvs->rx_crc_errors;
482
483         /* frame alignment errors */
484         dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
485
486         /* receiver fifo overrun */
487         /* drops_no_pbuf is no per i/f, it's per BE card */
488         dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
489                                 drvs->rx_input_fifo_overflow_drop +
490                                 drvs->rx_drops_no_pbuf;
491 }
492
493 void be_link_status_update(struct be_adapter *adapter, bool link_up)
494 {
495         struct net_device *netdev = adapter->netdev;
496
497         /* If link came up or went down */
498         if (adapter->link_up != link_up) {
499                 adapter->link_speed = -1;
500                 if (link_up) {
501                         netif_carrier_on(netdev);
502                         printk(KERN_INFO "%s: Link up\n", netdev->name);
503                 } else {
504                         netif_carrier_off(netdev);
505                         printk(KERN_INFO "%s: Link down\n", netdev->name);
506                 }
507                 adapter->link_up = link_up;
508         }
509 }
510
511 /* Update the EQ delay n BE based on the RX frags consumed / sec */
512 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
513 {
514         struct be_eq_obj *rx_eq = &rxo->rx_eq;
515         struct be_rx_stats *stats = &rxo->stats;
516         ulong now = jiffies;
517         u32 eqd;
518
519         if (!rx_eq->enable_aic)
520                 return;
521
522         /* Wrapped around */
523         if (time_before(now, stats->rx_fps_jiffies)) {
524                 stats->rx_fps_jiffies = now;
525                 return;
526         }
527
528         /* Update once a second */
529         if ((now - stats->rx_fps_jiffies) < HZ)
530                 return;
531
532         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
533                         ((now - stats->rx_fps_jiffies) / HZ);
534
535         stats->rx_fps_jiffies = now;
536         stats->prev_rx_frags = stats->rx_frags;
537         eqd = stats->rx_fps / 110000;
538         eqd = eqd << 3;
539         if (eqd > rx_eq->max_eqd)
540                 eqd = rx_eq->max_eqd;
541         if (eqd < rx_eq->min_eqd)
542                 eqd = rx_eq->min_eqd;
543         if (eqd < 10)
544                 eqd = 0;
545         if (eqd != rx_eq->cur_eqd)
546                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
547
548         rx_eq->cur_eqd = eqd;
549 }
550
551 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
552 {
553         u64 rate = bytes;
554
555         do_div(rate, ticks / HZ);
556         rate <<= 3;                     /* bytes/sec -> bits/sec */
557         do_div(rate, 1000000ul);        /* MB/Sec */
558
559         return rate;
560 }
561
562 static void be_tx_rate_update(struct be_tx_obj *txo)
563 {
564         struct be_tx_stats *stats = tx_stats(txo);
565         ulong now = jiffies;
566
567         /* Wrapped around? */
568         if (time_before(now, stats->be_tx_jiffies)) {
569                 stats->be_tx_jiffies = now;
570                 return;
571         }
572
573         /* Update tx rate once in two seconds */
574         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
575                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
576                                                   - stats->be_tx_bytes_prev,
577                                                  now - stats->be_tx_jiffies);
578                 stats->be_tx_jiffies = now;
579                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
580         }
581 }
582
583 static void be_tx_stats_update(struct be_tx_obj *txo,
584                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
585 {
586         struct be_tx_stats *stats = tx_stats(txo);
587
588         stats->be_tx_reqs++;
589         stats->be_tx_wrbs += wrb_cnt;
590         stats->be_tx_bytes += copied;
591         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
592         if (stopped)
593                 stats->be_tx_stops++;
594 }
595
596 /* Determine number of WRB entries needed to xmit data in an skb */
597 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
598                                                                 bool *dummy)
599 {
600         int cnt = (skb->len > skb->data_len);
601
602         cnt += skb_shinfo(skb)->nr_frags;
603
604         /* to account for hdr wrb */
605         cnt++;
606         if (lancer_chip(adapter) || !(cnt & 1)) {
607                 *dummy = false;
608         } else {
609                 /* add a dummy to make it an even num */
610                 cnt++;
611                 *dummy = true;
612         }
613         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
614         return cnt;
615 }
616
617 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
618 {
619         wrb->frag_pa_hi = upper_32_bits(addr);
620         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
621         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
622 }
623
624 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
625                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
626 {
627         u8 vlan_prio = 0;
628         u16 vlan_tag = 0;
629
630         memset(hdr, 0, sizeof(*hdr));
631
632         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
633
634         if (skb_is_gso(skb)) {
635                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
636                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
637                         hdr, skb_shinfo(skb)->gso_size);
638                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
639                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
640                 if (lancer_chip(adapter) && adapter->sli_family  ==
641                                                         LANCER_A0_SLI_FAMILY) {
642                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
643                         if (is_tcp_pkt(skb))
644                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
645                                                                 tcpcs, hdr, 1);
646                         else if (is_udp_pkt(skb))
647                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
648                                                                 udpcs, hdr, 1);
649                 }
650         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
651                 if (is_tcp_pkt(skb))
652                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
653                 else if (is_udp_pkt(skb))
654                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
655         }
656
657         if (vlan_tx_tag_present(skb)) {
658                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
659                 vlan_tag = vlan_tx_tag_get(skb);
660                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
661                 /* If vlan priority provided by OS is NOT in available bmap */
662                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
663                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
664                                         adapter->recommended_prio;
665                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
666         }
667
668         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
669         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
670         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
671         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
672 }
673
674 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
675                 bool unmap_single)
676 {
677         dma_addr_t dma;
678
679         be_dws_le_to_cpu(wrb, sizeof(*wrb));
680
681         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
682         if (wrb->frag_len) {
683                 if (unmap_single)
684                         dma_unmap_single(dev, dma, wrb->frag_len,
685                                          DMA_TO_DEVICE);
686                 else
687                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
688         }
689 }
690
691 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
692                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
693 {
694         dma_addr_t busaddr;
695         int i, copied = 0;
696         struct device *dev = &adapter->pdev->dev;
697         struct sk_buff *first_skb = skb;
698         struct be_eth_wrb *wrb;
699         struct be_eth_hdr_wrb *hdr;
700         bool map_single = false;
701         u16 map_head;
702
703         hdr = queue_head_node(txq);
704         queue_head_inc(txq);
705         map_head = txq->head;
706
707         if (skb->len > skb->data_len) {
708                 int len = skb_headlen(skb);
709                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
710                 if (dma_mapping_error(dev, busaddr))
711                         goto dma_err;
712                 map_single = true;
713                 wrb = queue_head_node(txq);
714                 wrb_fill(wrb, busaddr, len);
715                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
716                 queue_head_inc(txq);
717                 copied += len;
718         }
719
720         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
721                 struct skb_frag_struct *frag =
722                         &skb_shinfo(skb)->frags[i];
723                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
724                                        frag->size, DMA_TO_DEVICE);
725                 if (dma_mapping_error(dev, busaddr))
726                         goto dma_err;
727                 wrb = queue_head_node(txq);
728                 wrb_fill(wrb, busaddr, frag->size);
729                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
730                 queue_head_inc(txq);
731                 copied += frag->size;
732         }
733
734         if (dummy_wrb) {
735                 wrb = queue_head_node(txq);
736                 wrb_fill(wrb, 0, 0);
737                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
738                 queue_head_inc(txq);
739         }
740
741         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
742         be_dws_cpu_to_le(hdr, sizeof(*hdr));
743
744         return copied;
745 dma_err:
746         txq->head = map_head;
747         while (copied) {
748                 wrb = queue_head_node(txq);
749                 unmap_tx_frag(dev, wrb, map_single);
750                 map_single = false;
751                 copied -= wrb->frag_len;
752                 queue_head_inc(txq);
753         }
754         return 0;
755 }
756
757 static netdev_tx_t be_xmit(struct sk_buff *skb,
758                         struct net_device *netdev)
759 {
760         struct be_adapter *adapter = netdev_priv(netdev);
761         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
762         struct be_queue_info *txq = &txo->q;
763         u32 wrb_cnt = 0, copied = 0;
764         u32 start = txq->head;
765         bool dummy_wrb, stopped = false;
766
767         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
768
769         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
770         if (copied) {
771                 /* record the sent skb in the sent_skb table */
772                 BUG_ON(txo->sent_skb_list[start]);
773                 txo->sent_skb_list[start] = skb;
774
775                 /* Ensure txq has space for the next skb; Else stop the queue
776                  * *BEFORE* ringing the tx doorbell, so that we serialze the
777                  * tx compls of the current transmit which'll wake up the queue
778                  */
779                 atomic_add(wrb_cnt, &txq->used);
780                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
781                                                                 txq->len) {
782                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
783                         stopped = true;
784                 }
785
786                 be_txq_notify(adapter, txq->id, wrb_cnt);
787
788                 be_tx_stats_update(txo, wrb_cnt, copied,
789                                 skb_shinfo(skb)->gso_segs, stopped);
790         } else {
791                 txq->head = start;
792                 dev_kfree_skb_any(skb);
793         }
794         return NETDEV_TX_OK;
795 }
796
797 static int be_change_mtu(struct net_device *netdev, int new_mtu)
798 {
799         struct be_adapter *adapter = netdev_priv(netdev);
800         if (new_mtu < BE_MIN_MTU ||
801                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
802                                         (ETH_HLEN + ETH_FCS_LEN))) {
803                 dev_info(&adapter->pdev->dev,
804                         "MTU must be between %d and %d bytes\n",
805                         BE_MIN_MTU,
806                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
807                 return -EINVAL;
808         }
809         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
810                         netdev->mtu, new_mtu);
811         netdev->mtu = new_mtu;
812         return 0;
813 }
814
815 /*
816  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
817  * If the user configures more, place BE in vlan promiscuous mode.
818  */
819 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
820 {
821         u16 vtag[BE_NUM_VLANS_SUPPORTED];
822         u16 ntags = 0, i;
823         int status = 0;
824         u32 if_handle;
825
826         if (vf) {
827                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
828                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
829                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
830         }
831
832         if (adapter->vlans_added <= adapter->max_vlans)  {
833                 /* Construct VLAN Table to give to HW */
834                 for (i = 0; i < VLAN_N_VID; i++) {
835                         if (adapter->vlan_tag[i]) {
836                                 vtag[ntags] = cpu_to_le16(i);
837                                 ntags++;
838                         }
839                 }
840                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
841                                         vtag, ntags, 1, 0);
842         } else {
843                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
844                                         NULL, 0, 1, 1);
845         }
846
847         return status;
848 }
849
850 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
851 {
852         struct be_adapter *adapter = netdev_priv(netdev);
853
854         adapter->vlans_added++;
855         if (!be_physfn(adapter))
856                 return;
857
858         adapter->vlan_tag[vid] = 1;
859         if (adapter->vlans_added <= (adapter->max_vlans + 1))
860                 be_vid_config(adapter, false, 0);
861 }
862
863 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
864 {
865         struct be_adapter *adapter = netdev_priv(netdev);
866
867         adapter->vlans_added--;
868
869         if (!be_physfn(adapter))
870                 return;
871
872         adapter->vlan_tag[vid] = 0;
873         if (adapter->vlans_added <= adapter->max_vlans)
874                 be_vid_config(adapter, false, 0);
875 }
876
877 static void be_set_multicast_list(struct net_device *netdev)
878 {
879         struct be_adapter *adapter = netdev_priv(netdev);
880
881         if (netdev->flags & IFF_PROMISC) {
882                 be_cmd_promiscuous_config(adapter, true);
883                 adapter->promiscuous = true;
884                 goto done;
885         }
886
887         /* BE was previously in promiscuous mode; disable it */
888         if (adapter->promiscuous) {
889                 adapter->promiscuous = false;
890                 be_cmd_promiscuous_config(adapter, false);
891         }
892
893         /* Enable multicast promisc if num configured exceeds what we support */
894         if (netdev->flags & IFF_ALLMULTI ||
895             netdev_mc_count(netdev) > BE_MAX_MC) {
896                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
897                                 &adapter->mc_cmd_mem);
898                 goto done;
899         }
900
901         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
902                 &adapter->mc_cmd_mem);
903 done:
904         return;
905 }
906
907 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
908 {
909         struct be_adapter *adapter = netdev_priv(netdev);
910         int status;
911
912         if (!adapter->sriov_enabled)
913                 return -EPERM;
914
915         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
916                 return -EINVAL;
917
918         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
919                 status = be_cmd_pmac_del(adapter,
920                                         adapter->vf_cfg[vf].vf_if_handle,
921                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
922
923         status = be_cmd_pmac_add(adapter, mac,
924                                 adapter->vf_cfg[vf].vf_if_handle,
925                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
926
927         if (status)
928                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
929                                 mac, vf);
930         else
931                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
932
933         return status;
934 }
935
936 static int be_get_vf_config(struct net_device *netdev, int vf,
937                         struct ifla_vf_info *vi)
938 {
939         struct be_adapter *adapter = netdev_priv(netdev);
940
941         if (!adapter->sriov_enabled)
942                 return -EPERM;
943
944         if (vf >= num_vfs)
945                 return -EINVAL;
946
947         vi->vf = vf;
948         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
949         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
950         vi->qos = 0;
951         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
952
953         return 0;
954 }
955
956 static int be_set_vf_vlan(struct net_device *netdev,
957                         int vf, u16 vlan, u8 qos)
958 {
959         struct be_adapter *adapter = netdev_priv(netdev);
960         int status = 0;
961
962         if (!adapter->sriov_enabled)
963                 return -EPERM;
964
965         if ((vf >= num_vfs) || (vlan > 4095))
966                 return -EINVAL;
967
968         if (vlan) {
969                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
970                 adapter->vlans_added++;
971         } else {
972                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
973                 adapter->vlans_added--;
974         }
975
976         status = be_vid_config(adapter, true, vf);
977
978         if (status)
979                 dev_info(&adapter->pdev->dev,
980                                 "VLAN %d config on VF %d failed\n", vlan, vf);
981         return status;
982 }
983
984 static int be_set_vf_tx_rate(struct net_device *netdev,
985                         int vf, int rate)
986 {
987         struct be_adapter *adapter = netdev_priv(netdev);
988         int status = 0;
989
990         if (!adapter->sriov_enabled)
991                 return -EPERM;
992
993         if ((vf >= num_vfs) || (rate < 0))
994                 return -EINVAL;
995
996         if (rate > 10000)
997                 rate = 10000;
998
999         adapter->vf_cfg[vf].vf_tx_rate = rate;
1000         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1001
1002         if (status)
1003                 dev_info(&adapter->pdev->dev,
1004                                 "tx rate %d on VF %d failed\n", rate, vf);
1005         return status;
1006 }
1007
1008 static void be_rx_rate_update(struct be_rx_obj *rxo)
1009 {
1010         struct be_rx_stats *stats = &rxo->stats;
1011         ulong now = jiffies;
1012
1013         /* Wrapped around */
1014         if (time_before(now, stats->rx_jiffies)) {
1015                 stats->rx_jiffies = now;
1016                 return;
1017         }
1018
1019         /* Update the rate once in two seconds */
1020         if ((now - stats->rx_jiffies) < 2 * HZ)
1021                 return;
1022
1023         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1024                                 now - stats->rx_jiffies);
1025         stats->rx_jiffies = now;
1026         stats->rx_bytes_prev = stats->rx_bytes;
1027 }
1028
1029 static void be_rx_stats_update(struct be_rx_obj *rxo,
1030                 struct be_rx_compl_info *rxcp)
1031 {
1032         struct be_rx_stats *stats = &rxo->stats;
1033
1034         stats->rx_compl++;
1035         stats->rx_frags += rxcp->num_rcvd;
1036         stats->rx_bytes += rxcp->pkt_size;
1037         stats->rx_pkts++;
1038         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1039                 stats->rx_mcast_pkts++;
1040         if (rxcp->err)
1041                 stats->rxcp_err++;
1042 }
1043
1044 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1045 {
1046         /* L4 checksum is not reliable for non TCP/UDP packets.
1047          * Also ignore ipcksm for ipv6 pkts */
1048         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1049                                 (rxcp->ip_csum || rxcp->ipv6);
1050 }
1051
1052 static struct be_rx_page_info *
1053 get_rx_page_info(struct be_adapter *adapter,
1054                 struct be_rx_obj *rxo,
1055                 u16 frag_idx)
1056 {
1057         struct be_rx_page_info *rx_page_info;
1058         struct be_queue_info *rxq = &rxo->q;
1059
1060         rx_page_info = &rxo->page_info_tbl[frag_idx];
1061         BUG_ON(!rx_page_info->page);
1062
1063         if (rx_page_info->last_page_user) {
1064                 dma_unmap_page(&adapter->pdev->dev,
1065                                dma_unmap_addr(rx_page_info, bus),
1066                                adapter->big_page_size, DMA_FROM_DEVICE);
1067                 rx_page_info->last_page_user = false;
1068         }
1069
1070         atomic_dec(&rxq->used);
1071         return rx_page_info;
1072 }
1073
1074 /* Throwaway the data in the Rx completion */
1075 static void be_rx_compl_discard(struct be_adapter *adapter,
1076                 struct be_rx_obj *rxo,
1077                 struct be_rx_compl_info *rxcp)
1078 {
1079         struct be_queue_info *rxq = &rxo->q;
1080         struct be_rx_page_info *page_info;
1081         u16 i, num_rcvd = rxcp->num_rcvd;
1082
1083         for (i = 0; i < num_rcvd; i++) {
1084                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1085                 put_page(page_info->page);
1086                 memset(page_info, 0, sizeof(*page_info));
1087                 index_inc(&rxcp->rxq_idx, rxq->len);
1088         }
1089 }
1090
1091 /*
1092  * skb_fill_rx_data forms a complete skb for an ether frame
1093  * indicated by rxcp.
1094  */
1095 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1096                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1097 {
1098         struct be_queue_info *rxq = &rxo->q;
1099         struct be_rx_page_info *page_info;
1100         u16 i, j;
1101         u16 hdr_len, curr_frag_len, remaining;
1102         u8 *start;
1103
1104         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1105         start = page_address(page_info->page) + page_info->page_offset;
1106         prefetch(start);
1107
1108         /* Copy data in the first descriptor of this completion */
1109         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1110
1111         /* Copy the header portion into skb_data */
1112         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1113         memcpy(skb->data, start, hdr_len);
1114         skb->len = curr_frag_len;
1115         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1116                 /* Complete packet has now been moved to data */
1117                 put_page(page_info->page);
1118                 skb->data_len = 0;
1119                 skb->tail += curr_frag_len;
1120         } else {
1121                 skb_shinfo(skb)->nr_frags = 1;
1122                 skb_shinfo(skb)->frags[0].page = page_info->page;
1123                 skb_shinfo(skb)->frags[0].page_offset =
1124                                         page_info->page_offset + hdr_len;
1125                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1126                 skb->data_len = curr_frag_len - hdr_len;
1127                 skb->tail += hdr_len;
1128         }
1129         page_info->page = NULL;
1130
1131         if (rxcp->pkt_size <= rx_frag_size) {
1132                 BUG_ON(rxcp->num_rcvd != 1);
1133                 return;
1134         }
1135
1136         /* More frags present for this completion */
1137         index_inc(&rxcp->rxq_idx, rxq->len);
1138         remaining = rxcp->pkt_size - curr_frag_len;
1139         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1140                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1141                 curr_frag_len = min(remaining, rx_frag_size);
1142
1143                 /* Coalesce all frags from the same physical page in one slot */
1144                 if (page_info->page_offset == 0) {
1145                         /* Fresh page */
1146                         j++;
1147                         skb_shinfo(skb)->frags[j].page = page_info->page;
1148                         skb_shinfo(skb)->frags[j].page_offset =
1149                                                         page_info->page_offset;
1150                         skb_shinfo(skb)->frags[j].size = 0;
1151                         skb_shinfo(skb)->nr_frags++;
1152                 } else {
1153                         put_page(page_info->page);
1154                 }
1155
1156                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1157                 skb->len += curr_frag_len;
1158                 skb->data_len += curr_frag_len;
1159
1160                 remaining -= curr_frag_len;
1161                 index_inc(&rxcp->rxq_idx, rxq->len);
1162                 page_info->page = NULL;
1163         }
1164         BUG_ON(j > MAX_SKB_FRAGS);
1165 }
1166
1167 /* Process the RX completion indicated by rxcp when GRO is disabled */
1168 static void be_rx_compl_process(struct be_adapter *adapter,
1169                         struct be_rx_obj *rxo,
1170                         struct be_rx_compl_info *rxcp)
1171 {
1172         struct net_device *netdev = adapter->netdev;
1173         struct sk_buff *skb;
1174
1175         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1176         if (unlikely(!skb)) {
1177                 rxo->stats.rx_dropped++;
1178                 be_rx_compl_discard(adapter, rxo, rxcp);
1179                 return;
1180         }
1181
1182         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1183
1184         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1185                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1186         else
1187                 skb_checksum_none_assert(skb);
1188
1189         skb->truesize = skb->len + sizeof(struct sk_buff);
1190         skb->protocol = eth_type_trans(skb, netdev);
1191         if (adapter->netdev->features & NETIF_F_RXHASH)
1192                 skb->rxhash = rxcp->rss_hash;
1193
1194
1195         if (unlikely(rxcp->vlanf))
1196                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1197
1198         netif_receive_skb(skb);
1199 }
1200
1201 /* Process the RX completion indicated by rxcp when GRO is enabled */
1202 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1203                 struct be_rx_obj *rxo,
1204                 struct be_rx_compl_info *rxcp)
1205 {
1206         struct be_rx_page_info *page_info;
1207         struct sk_buff *skb = NULL;
1208         struct be_queue_info *rxq = &rxo->q;
1209         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1210         u16 remaining, curr_frag_len;
1211         u16 i, j;
1212
1213         skb = napi_get_frags(&eq_obj->napi);
1214         if (!skb) {
1215                 be_rx_compl_discard(adapter, rxo, rxcp);
1216                 return;
1217         }
1218
1219         remaining = rxcp->pkt_size;
1220         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1221                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1222
1223                 curr_frag_len = min(remaining, rx_frag_size);
1224
1225                 /* Coalesce all frags from the same physical page in one slot */
1226                 if (i == 0 || page_info->page_offset == 0) {
1227                         /* First frag or Fresh page */
1228                         j++;
1229                         skb_shinfo(skb)->frags[j].page = page_info->page;
1230                         skb_shinfo(skb)->frags[j].page_offset =
1231                                                         page_info->page_offset;
1232                         skb_shinfo(skb)->frags[j].size = 0;
1233                 } else {
1234                         put_page(page_info->page);
1235                 }
1236                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1237
1238                 remaining -= curr_frag_len;
1239                 index_inc(&rxcp->rxq_idx, rxq->len);
1240                 memset(page_info, 0, sizeof(*page_info));
1241         }
1242         BUG_ON(j > MAX_SKB_FRAGS);
1243
1244         skb_shinfo(skb)->nr_frags = j + 1;
1245         skb->len = rxcp->pkt_size;
1246         skb->data_len = rxcp->pkt_size;
1247         skb->truesize += rxcp->pkt_size;
1248         skb->ip_summed = CHECKSUM_UNNECESSARY;
1249         if (adapter->netdev->features & NETIF_F_RXHASH)
1250                 skb->rxhash = rxcp->rss_hash;
1251
1252         if (unlikely(rxcp->vlanf))
1253                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1254
1255         napi_gro_frags(&eq_obj->napi);
1256 }
1257
1258 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1259                                 struct be_eth_rx_compl *compl,
1260                                 struct be_rx_compl_info *rxcp)
1261 {
1262         rxcp->pkt_size =
1263                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1264         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1265         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1266         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1267         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1268         rxcp->ip_csum =
1269                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1270         rxcp->l4_csum =
1271                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1272         rxcp->ipv6 =
1273                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1274         rxcp->rxq_idx =
1275                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1276         rxcp->num_rcvd =
1277                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1278         rxcp->pkt_type =
1279                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1280         rxcp->rss_hash =
1281                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1282         if (rxcp->vlanf) {
1283                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1284                                           compl);
1285                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1286                                                compl);
1287         }
1288 }
1289
1290 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1291                                 struct be_eth_rx_compl *compl,
1292                                 struct be_rx_compl_info *rxcp)
1293 {
1294         rxcp->pkt_size =
1295                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1296         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1297         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1298         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1299         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1300         rxcp->ip_csum =
1301                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1302         rxcp->l4_csum =
1303                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1304         rxcp->ipv6 =
1305                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1306         rxcp->rxq_idx =
1307                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1308         rxcp->num_rcvd =
1309                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1310         rxcp->pkt_type =
1311                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1312         rxcp->rss_hash =
1313                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1314         if (rxcp->vlanf) {
1315                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1316                                           compl);
1317                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1318                                                compl);
1319         }
1320 }
1321
1322 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1323 {
1324         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1325         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1326         struct be_adapter *adapter = rxo->adapter;
1327
1328         /* For checking the valid bit it is Ok to use either definition as the
1329          * valid bit is at the same position in both v0 and v1 Rx compl */
1330         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1331                 return NULL;
1332
1333         rmb();
1334         be_dws_le_to_cpu(compl, sizeof(*compl));
1335
1336         if (adapter->be3_native)
1337                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1338         else
1339                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1340
1341         if (rxcp->vlanf) {
1342                 /* vlanf could be wrongly set in some cards.
1343                  * ignore if vtm is not set */
1344                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1345                         rxcp->vlanf = 0;
1346
1347                 if (!lancer_chip(adapter))
1348                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1349
1350                 if (((adapter->pvid & VLAN_VID_MASK) ==
1351                      (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1352                     !adapter->vlan_tag[rxcp->vlan_tag])
1353                         rxcp->vlanf = 0;
1354         }
1355
1356         /* As the compl has been parsed, reset it; we wont touch it again */
1357         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1358
1359         queue_tail_inc(&rxo->cq);
1360         return rxcp;
1361 }
1362
1363 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1364 {
1365         u32 order = get_order(size);
1366
1367         if (order > 0)
1368                 gfp |= __GFP_COMP;
1369         return  alloc_pages(gfp, order);
1370 }
1371
1372 /*
1373  * Allocate a page, split it to fragments of size rx_frag_size and post as
1374  * receive buffers to BE
1375  */
1376 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1377 {
1378         struct be_adapter *adapter = rxo->adapter;
1379         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1380         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1381         struct be_queue_info *rxq = &rxo->q;
1382         struct page *pagep = NULL;
1383         struct be_eth_rx_d *rxd;
1384         u64 page_dmaaddr = 0, frag_dmaaddr;
1385         u32 posted, page_offset = 0;
1386
1387         page_info = &rxo->page_info_tbl[rxq->head];
1388         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1389                 if (!pagep) {
1390                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1391                         if (unlikely(!pagep)) {
1392                                 rxo->stats.rx_post_fail++;
1393                                 break;
1394                         }
1395                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1396                                                     0, adapter->big_page_size,
1397                                                     DMA_FROM_DEVICE);
1398                         page_info->page_offset = 0;
1399                 } else {
1400                         get_page(pagep);
1401                         page_info->page_offset = page_offset + rx_frag_size;
1402                 }
1403                 page_offset = page_info->page_offset;
1404                 page_info->page = pagep;
1405                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1406                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1407
1408                 rxd = queue_head_node(rxq);
1409                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1410                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1411
1412                 /* Any space left in the current big page for another frag? */
1413                 if ((page_offset + rx_frag_size + rx_frag_size) >
1414                                         adapter->big_page_size) {
1415                         pagep = NULL;
1416                         page_info->last_page_user = true;
1417                 }
1418
1419                 prev_page_info = page_info;
1420                 queue_head_inc(rxq);
1421                 page_info = &page_info_tbl[rxq->head];
1422         }
1423         if (pagep)
1424                 prev_page_info->last_page_user = true;
1425
1426         if (posted) {
1427                 atomic_add(posted, &rxq->used);
1428                 be_rxq_notify(adapter, rxq->id, posted);
1429         } else if (atomic_read(&rxq->used) == 0) {
1430                 /* Let be_worker replenish when memory is available */
1431                 rxo->rx_post_starved = true;
1432         }
1433 }
1434
1435 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1436 {
1437         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1438
1439         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1440                 return NULL;
1441
1442         rmb();
1443         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1444
1445         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1446
1447         queue_tail_inc(tx_cq);
1448         return txcp;
1449 }
1450
1451 static u16 be_tx_compl_process(struct be_adapter *adapter,
1452                 struct be_tx_obj *txo, u16 last_index)
1453 {
1454         struct be_queue_info *txq = &txo->q;
1455         struct be_eth_wrb *wrb;
1456         struct sk_buff **sent_skbs = txo->sent_skb_list;
1457         struct sk_buff *sent_skb;
1458         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1459         bool unmap_skb_hdr = true;
1460
1461         sent_skb = sent_skbs[txq->tail];
1462         BUG_ON(!sent_skb);
1463         sent_skbs[txq->tail] = NULL;
1464
1465         /* skip header wrb */
1466         queue_tail_inc(txq);
1467
1468         do {
1469                 cur_index = txq->tail;
1470                 wrb = queue_tail_node(txq);
1471                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1472                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1473                 unmap_skb_hdr = false;
1474
1475                 num_wrbs++;
1476                 queue_tail_inc(txq);
1477         } while (cur_index != last_index);
1478
1479         kfree_skb(sent_skb);
1480         return num_wrbs;
1481 }
1482
1483 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1484 {
1485         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1486
1487         if (!eqe->evt)
1488                 return NULL;
1489
1490         rmb();
1491         eqe->evt = le32_to_cpu(eqe->evt);
1492         queue_tail_inc(&eq_obj->q);
1493         return eqe;
1494 }
1495
1496 static int event_handle(struct be_adapter *adapter,
1497                         struct be_eq_obj *eq_obj,
1498                         bool rearm)
1499 {
1500         struct be_eq_entry *eqe;
1501         u16 num = 0;
1502
1503         while ((eqe = event_get(eq_obj)) != NULL) {
1504                 eqe->evt = 0;
1505                 num++;
1506         }
1507
1508         /* Deal with any spurious interrupts that come
1509          * without events
1510          */
1511         if (!num)
1512                 rearm = true;
1513
1514         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1515         if (num)
1516                 napi_schedule(&eq_obj->napi);
1517
1518         return num;
1519 }
1520
1521 /* Just read and notify events without processing them.
1522  * Used at the time of destroying event queues */
1523 static void be_eq_clean(struct be_adapter *adapter,
1524                         struct be_eq_obj *eq_obj)
1525 {
1526         struct be_eq_entry *eqe;
1527         u16 num = 0;
1528
1529         while ((eqe = event_get(eq_obj)) != NULL) {
1530                 eqe->evt = 0;
1531                 num++;
1532         }
1533
1534         if (num)
1535                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1536 }
1537
1538 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1539 {
1540         struct be_rx_page_info *page_info;
1541         struct be_queue_info *rxq = &rxo->q;
1542         struct be_queue_info *rx_cq = &rxo->cq;
1543         struct be_rx_compl_info *rxcp;
1544         u16 tail;
1545
1546         /* First cleanup pending rx completions */
1547         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1548                 be_rx_compl_discard(adapter, rxo, rxcp);
1549                 be_cq_notify(adapter, rx_cq->id, false, 1);
1550         }
1551
1552         /* Then free posted rx buffer that were not used */
1553         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1554         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1555                 page_info = get_rx_page_info(adapter, rxo, tail);
1556                 put_page(page_info->page);
1557                 memset(page_info, 0, sizeof(*page_info));
1558         }
1559         BUG_ON(atomic_read(&rxq->used));
1560         rxq->tail = rxq->head = 0;
1561 }
1562
1563 static void be_tx_compl_clean(struct be_adapter *adapter,
1564                                 struct be_tx_obj *txo)
1565 {
1566         struct be_queue_info *tx_cq = &txo->cq;
1567         struct be_queue_info *txq = &txo->q;
1568         struct be_eth_tx_compl *txcp;
1569         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1570         struct sk_buff **sent_skbs = txo->sent_skb_list;
1571         struct sk_buff *sent_skb;
1572         bool dummy_wrb;
1573
1574         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1575         do {
1576                 while ((txcp = be_tx_compl_get(tx_cq))) {
1577                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1578                                         wrb_index, txcp);
1579                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1580                         cmpl++;
1581                 }
1582                 if (cmpl) {
1583                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1584                         atomic_sub(num_wrbs, &txq->used);
1585                         cmpl = 0;
1586                         num_wrbs = 0;
1587                 }
1588
1589                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1590                         break;
1591
1592                 mdelay(1);
1593         } while (true);
1594
1595         if (atomic_read(&txq->used))
1596                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1597                         atomic_read(&txq->used));
1598
1599         /* free posted tx for which compls will never arrive */
1600         while (atomic_read(&txq->used)) {
1601                 sent_skb = sent_skbs[txq->tail];
1602                 end_idx = txq->tail;
1603                 index_adv(&end_idx,
1604                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1605                         txq->len);
1606                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1607                 atomic_sub(num_wrbs, &txq->used);
1608         }
1609 }
1610
1611 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1612 {
1613         struct be_queue_info *q;
1614
1615         q = &adapter->mcc_obj.q;
1616         if (q->created)
1617                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1618         be_queue_free(adapter, q);
1619
1620         q = &adapter->mcc_obj.cq;
1621         if (q->created)
1622                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1623         be_queue_free(adapter, q);
1624 }
1625
1626 /* Must be called only after TX qs are created as MCC shares TX EQ */
1627 static int be_mcc_queues_create(struct be_adapter *adapter)
1628 {
1629         struct be_queue_info *q, *cq;
1630
1631         /* Alloc MCC compl queue */
1632         cq = &adapter->mcc_obj.cq;
1633         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1634                         sizeof(struct be_mcc_compl)))
1635                 goto err;
1636
1637         /* Ask BE to create MCC compl queue; share TX's eq */
1638         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1639                 goto mcc_cq_free;
1640
1641         /* Alloc MCC queue */
1642         q = &adapter->mcc_obj.q;
1643         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1644                 goto mcc_cq_destroy;
1645
1646         /* Ask BE to create MCC queue */
1647         if (be_cmd_mccq_create(adapter, q, cq))
1648                 goto mcc_q_free;
1649
1650         return 0;
1651
1652 mcc_q_free:
1653         be_queue_free(adapter, q);
1654 mcc_cq_destroy:
1655         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1656 mcc_cq_free:
1657         be_queue_free(adapter, cq);
1658 err:
1659         return -1;
1660 }
1661
1662 static void be_tx_queues_destroy(struct be_adapter *adapter)
1663 {
1664         struct be_queue_info *q;
1665         struct be_tx_obj *txo;
1666         u8 i;
1667
1668         for_all_tx_queues(adapter, txo, i) {
1669                 q = &txo->q;
1670                 if (q->created)
1671                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1672                 be_queue_free(adapter, q);
1673
1674                 q = &txo->cq;
1675                 if (q->created)
1676                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1677                 be_queue_free(adapter, q);
1678         }
1679
1680         /* Clear any residual events */
1681         be_eq_clean(adapter, &adapter->tx_eq);
1682
1683         q = &adapter->tx_eq.q;
1684         if (q->created)
1685                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1686         be_queue_free(adapter, q);
1687 }
1688
1689 /* One TX event queue is shared by all TX compl qs */
1690 static int be_tx_queues_create(struct be_adapter *adapter)
1691 {
1692         struct be_queue_info *eq, *q, *cq;
1693         struct be_tx_obj *txo;
1694         u8 i;
1695
1696         adapter->tx_eq.max_eqd = 0;
1697         adapter->tx_eq.min_eqd = 0;
1698         adapter->tx_eq.cur_eqd = 96;
1699         adapter->tx_eq.enable_aic = false;
1700
1701         eq = &adapter->tx_eq.q;
1702         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1703                 sizeof(struct be_eq_entry)))
1704                 return -1;
1705
1706         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1707                 goto err;
1708         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1709
1710         for_all_tx_queues(adapter, txo, i) {
1711                 cq = &txo->cq;
1712                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1713                         sizeof(struct be_eth_tx_compl)))
1714                         goto err;
1715
1716                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1717                         goto err;
1718
1719                 q = &txo->q;
1720                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1721                         sizeof(struct be_eth_wrb)))
1722                         goto err;
1723
1724                 if (be_cmd_txq_create(adapter, q, cq))
1725                         goto err;
1726         }
1727         return 0;
1728
1729 err:
1730         be_tx_queues_destroy(adapter);
1731         return -1;
1732 }
1733
1734 static void be_rx_queues_destroy(struct be_adapter *adapter)
1735 {
1736         struct be_queue_info *q;
1737         struct be_rx_obj *rxo;
1738         int i;
1739
1740         for_all_rx_queues(adapter, rxo, i) {
1741                 be_queue_free(adapter, &rxo->q);
1742
1743                 q = &rxo->cq;
1744                 if (q->created)
1745                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1746                 be_queue_free(adapter, q);
1747
1748                 q = &rxo->rx_eq.q;
1749                 if (q->created)
1750                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1751                 be_queue_free(adapter, q);
1752         }
1753 }
1754
1755 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1756 {
1757         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1758                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1759                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1760         } else {
1761                 dev_warn(&adapter->pdev->dev,
1762                         "No support for multiple RX queues\n");
1763                 return 1;
1764         }
1765 }
1766
1767 static int be_rx_queues_create(struct be_adapter *adapter)
1768 {
1769         struct be_queue_info *eq, *q, *cq;
1770         struct be_rx_obj *rxo;
1771         int rc, i;
1772
1773         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1774                                 msix_enabled(adapter) ?
1775                                         adapter->num_msix_vec - 1 : 1);
1776         if (adapter->num_rx_qs != MAX_RX_QS)
1777                 dev_warn(&adapter->pdev->dev,
1778                         "Can create only %d RX queues", adapter->num_rx_qs);
1779
1780         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1781         for_all_rx_queues(adapter, rxo, i) {
1782                 rxo->adapter = adapter;
1783                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1784                 rxo->rx_eq.enable_aic = true;
1785
1786                 /* EQ */
1787                 eq = &rxo->rx_eq.q;
1788                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1789                                         sizeof(struct be_eq_entry));
1790                 if (rc)
1791                         goto err;
1792
1793                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1794                 if (rc)
1795                         goto err;
1796
1797                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1798
1799                 /* CQ */
1800                 cq = &rxo->cq;
1801                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1802                                 sizeof(struct be_eth_rx_compl));
1803                 if (rc)
1804                         goto err;
1805
1806                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1807                 if (rc)
1808                         goto err;
1809
1810                 /* Rx Q - will be created in be_open() */
1811                 q = &rxo->q;
1812                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1813                                 sizeof(struct be_eth_rx_d));
1814                 if (rc)
1815                         goto err;
1816
1817         }
1818
1819         return 0;
1820 err:
1821         be_rx_queues_destroy(adapter);
1822         return -1;
1823 }
1824
1825 static bool event_peek(struct be_eq_obj *eq_obj)
1826 {
1827         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1828         if (!eqe->evt)
1829                 return false;
1830         else
1831                 return true;
1832 }
1833
1834 static irqreturn_t be_intx(int irq, void *dev)
1835 {
1836         struct be_adapter *adapter = dev;
1837         struct be_rx_obj *rxo;
1838         int isr, i, tx = 0 , rx = 0;
1839
1840         if (lancer_chip(adapter)) {
1841                 if (event_peek(&adapter->tx_eq))
1842                         tx = event_handle(adapter, &adapter->tx_eq, false);
1843                 for_all_rx_queues(adapter, rxo, i) {
1844                         if (event_peek(&rxo->rx_eq))
1845                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1846                 }
1847
1848                 if (!(tx || rx))
1849                         return IRQ_NONE;
1850
1851         } else {
1852                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1853                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1854                 if (!isr)
1855                         return IRQ_NONE;
1856
1857                 if ((1 << adapter->tx_eq.eq_idx & isr))
1858                         event_handle(adapter, &adapter->tx_eq, false);
1859
1860                 for_all_rx_queues(adapter, rxo, i) {
1861                         if ((1 << rxo->rx_eq.eq_idx & isr))
1862                                 event_handle(adapter, &rxo->rx_eq, true);
1863                 }
1864         }
1865
1866         return IRQ_HANDLED;
1867 }
1868
1869 static irqreturn_t be_msix_rx(int irq, void *dev)
1870 {
1871         struct be_rx_obj *rxo = dev;
1872         struct be_adapter *adapter = rxo->adapter;
1873
1874         event_handle(adapter, &rxo->rx_eq, true);
1875
1876         return IRQ_HANDLED;
1877 }
1878
1879 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1880 {
1881         struct be_adapter *adapter = dev;
1882
1883         event_handle(adapter, &adapter->tx_eq, false);
1884
1885         return IRQ_HANDLED;
1886 }
1887
1888 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1889 {
1890         return (rxcp->tcpf && !rxcp->err) ? true : false;
1891 }
1892
1893 static int be_poll_rx(struct napi_struct *napi, int budget)
1894 {
1895         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1896         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1897         struct be_adapter *adapter = rxo->adapter;
1898         struct be_queue_info *rx_cq = &rxo->cq;
1899         struct be_rx_compl_info *rxcp;
1900         u32 work_done;
1901
1902         rxo->stats.rx_polls++;
1903         for (work_done = 0; work_done < budget; work_done++) {
1904                 rxcp = be_rx_compl_get(rxo);
1905                 if (!rxcp)
1906                         break;
1907
1908                 /* Ignore flush completions */
1909                 if (rxcp->num_rcvd && rxcp->pkt_size) {
1910                         if (do_gro(rxcp))
1911                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1912                         else
1913                                 be_rx_compl_process(adapter, rxo, rxcp);
1914                 } else if (rxcp->pkt_size == 0) {
1915                         be_rx_compl_discard(adapter, rxo, rxcp);
1916                 }
1917
1918                 be_rx_stats_update(rxo, rxcp);
1919         }
1920
1921         /* Refill the queue */
1922         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1923                 be_post_rx_frags(rxo, GFP_ATOMIC);
1924
1925         /* All consumed */
1926         if (work_done < budget) {
1927                 napi_complete(napi);
1928                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1929         } else {
1930                 /* More to be consumed; continue with interrupts disabled */
1931                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1932         }
1933         return work_done;
1934 }
1935
1936 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1937  * For TX/MCC we don't honour budget; consume everything
1938  */
1939 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1940 {
1941         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1942         struct be_adapter *adapter =
1943                 container_of(tx_eq, struct be_adapter, tx_eq);
1944         struct be_tx_obj *txo;
1945         struct be_eth_tx_compl *txcp;
1946         int tx_compl, mcc_compl, status = 0;
1947         u8 i;
1948         u16 num_wrbs;
1949
1950         for_all_tx_queues(adapter, txo, i) {
1951                 tx_compl = 0;
1952                 num_wrbs = 0;
1953                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1954                         num_wrbs += be_tx_compl_process(adapter, txo,
1955                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1956                                         wrb_index, txcp));
1957                         tx_compl++;
1958                 }
1959                 if (tx_compl) {
1960                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1961
1962                         atomic_sub(num_wrbs, &txo->q.used);
1963
1964                         /* As Tx wrbs have been freed up, wake up netdev queue
1965                          * if it was stopped due to lack of tx wrbs.  */
1966                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1967                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1968                                 netif_wake_subqueue(adapter->netdev, i);
1969                         }
1970
1971                         adapter->drv_stats.be_tx_events++;
1972                         txo->stats.be_tx_compl += tx_compl;
1973                 }
1974         }
1975
1976         mcc_compl = be_process_mcc(adapter, &status);
1977
1978         if (mcc_compl) {
1979                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1980                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1981         }
1982
1983         napi_complete(napi);
1984
1985         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1986         return 1;
1987 }
1988
1989 void be_detect_dump_ue(struct be_adapter *adapter)
1990 {
1991         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1992         u32 i;
1993
1994         pci_read_config_dword(adapter->pdev,
1995                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1996         pci_read_config_dword(adapter->pdev,
1997                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1998         pci_read_config_dword(adapter->pdev,
1999                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2000         pci_read_config_dword(adapter->pdev,
2001                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2002
2003         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2004         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2005
2006         if (ue_status_lo || ue_status_hi) {
2007                 adapter->ue_detected = true;
2008                 adapter->eeh_err = true;
2009                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2010         }
2011
2012         if (ue_status_lo) {
2013                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2014                         if (ue_status_lo & 1)
2015                                 dev_err(&adapter->pdev->dev,
2016                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2017                 }
2018         }
2019         if (ue_status_hi) {
2020                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2021                         if (ue_status_hi & 1)
2022                                 dev_err(&adapter->pdev->dev,
2023                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2024                 }
2025         }
2026
2027 }
2028
2029 static void be_worker(struct work_struct *work)
2030 {
2031         struct be_adapter *adapter =
2032                 container_of(work, struct be_adapter, work.work);
2033         struct be_rx_obj *rxo;
2034         struct be_tx_obj *txo;
2035         int i;
2036
2037         if (!adapter->ue_detected && !lancer_chip(adapter))
2038                 be_detect_dump_ue(adapter);
2039
2040         /* when interrupts are not yet enabled, just reap any pending
2041         * mcc completions */
2042         if (!netif_running(adapter->netdev)) {
2043                 int mcc_compl, status = 0;
2044
2045                 mcc_compl = be_process_mcc(adapter, &status);
2046
2047                 if (mcc_compl) {
2048                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2049                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2050                 }
2051
2052                 goto reschedule;
2053         }
2054
2055         if (!adapter->stats_cmd_sent) {
2056                 if (lancer_chip(adapter))
2057                         lancer_cmd_get_pport_stats(adapter,
2058                                                 &adapter->stats_cmd);
2059                 else
2060                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2061         }
2062
2063         for_all_tx_queues(adapter, txo, i)
2064                 be_tx_rate_update(txo);
2065
2066         for_all_rx_queues(adapter, rxo, i) {
2067                 be_rx_rate_update(rxo);
2068                 be_rx_eqd_update(adapter, rxo);
2069
2070                 if (rxo->rx_post_starved) {
2071                         rxo->rx_post_starved = false;
2072                         be_post_rx_frags(rxo, GFP_KERNEL);
2073                 }
2074         }
2075
2076 reschedule:
2077         adapter->work_counter++;
2078         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2079 }
2080
2081 static void be_msix_disable(struct be_adapter *adapter)
2082 {
2083         if (msix_enabled(adapter)) {
2084                 pci_disable_msix(adapter->pdev);
2085                 adapter->num_msix_vec = 0;
2086         }
2087 }
2088
2089 static void be_msix_enable(struct be_adapter *adapter)
2090 {
2091 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2092         int i, status, num_vec;
2093
2094         num_vec = be_num_rxqs_want(adapter) + 1;
2095
2096         for (i = 0; i < num_vec; i++)
2097                 adapter->msix_entries[i].entry = i;
2098
2099         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2100         if (status == 0) {
2101                 goto done;
2102         } else if (status >= BE_MIN_MSIX_VECTORS) {
2103                 num_vec = status;
2104                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2105                                 num_vec) == 0)
2106                         goto done;
2107         }
2108         return;
2109 done:
2110         adapter->num_msix_vec = num_vec;
2111         return;
2112 }
2113
2114 static void be_sriov_enable(struct be_adapter *adapter)
2115 {
2116         be_check_sriov_fn_type(adapter);
2117 #ifdef CONFIG_PCI_IOV
2118         if (be_physfn(adapter) && num_vfs) {
2119                 int status, pos;
2120                 u16 nvfs;
2121
2122                 pos = pci_find_ext_capability(adapter->pdev,
2123                                                 PCI_EXT_CAP_ID_SRIOV);
2124                 pci_read_config_word(adapter->pdev,
2125                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2126
2127                 if (num_vfs > nvfs) {
2128                         dev_info(&adapter->pdev->dev,
2129                                         "Device supports %d VFs and not %d\n",
2130                                         nvfs, num_vfs);
2131                         num_vfs = nvfs;
2132                 }
2133
2134                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2135                 adapter->sriov_enabled = status ? false : true;
2136         }
2137 #endif
2138 }
2139
2140 static void be_sriov_disable(struct be_adapter *adapter)
2141 {
2142 #ifdef CONFIG_PCI_IOV
2143         if (adapter->sriov_enabled) {
2144                 pci_disable_sriov(adapter->pdev);
2145                 adapter->sriov_enabled = false;
2146         }
2147 #endif
2148 }
2149
2150 static inline int be_msix_vec_get(struct be_adapter *adapter,
2151                                         struct be_eq_obj *eq_obj)
2152 {
2153         return adapter->msix_entries[eq_obj->eq_idx].vector;
2154 }
2155
2156 static int be_request_irq(struct be_adapter *adapter,
2157                 struct be_eq_obj *eq_obj,
2158                 void *handler, char *desc, void *context)
2159 {
2160         struct net_device *netdev = adapter->netdev;
2161         int vec;
2162
2163         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2164         vec = be_msix_vec_get(adapter, eq_obj);
2165         return request_irq(vec, handler, 0, eq_obj->desc, context);
2166 }
2167
2168 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2169                         void *context)
2170 {
2171         int vec = be_msix_vec_get(adapter, eq_obj);
2172         free_irq(vec, context);
2173 }
2174
2175 static int be_msix_register(struct be_adapter *adapter)
2176 {
2177         struct be_rx_obj *rxo;
2178         int status, i;
2179         char qname[10];
2180
2181         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2182                                 adapter);
2183         if (status)
2184                 goto err;
2185
2186         for_all_rx_queues(adapter, rxo, i) {
2187                 sprintf(qname, "rxq%d", i);
2188                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2189                                 qname, rxo);
2190                 if (status)
2191                         goto err_msix;
2192         }
2193
2194         return 0;
2195
2196 err_msix:
2197         be_free_irq(adapter, &adapter->tx_eq, adapter);
2198
2199         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2200                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2201
2202 err:
2203         dev_warn(&adapter->pdev->dev,
2204                 "MSIX Request IRQ failed - err %d\n", status);
2205         be_msix_disable(adapter);
2206         return status;
2207 }
2208
2209 static int be_irq_register(struct be_adapter *adapter)
2210 {
2211         struct net_device *netdev = adapter->netdev;
2212         int status;
2213
2214         if (msix_enabled(adapter)) {
2215                 status = be_msix_register(adapter);
2216                 if (status == 0)
2217                         goto done;
2218                 /* INTx is not supported for VF */
2219                 if (!be_physfn(adapter))
2220                         return status;
2221         }
2222
2223         /* INTx */
2224         netdev->irq = adapter->pdev->irq;
2225         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2226                         adapter);
2227         if (status) {
2228                 dev_err(&adapter->pdev->dev,
2229                         "INTx request IRQ failed - err %d\n", status);
2230                 return status;
2231         }
2232 done:
2233         adapter->isr_registered = true;
2234         return 0;
2235 }
2236
2237 static void be_irq_unregister(struct be_adapter *adapter)
2238 {
2239         struct net_device *netdev = adapter->netdev;
2240         struct be_rx_obj *rxo;
2241         int i;
2242
2243         if (!adapter->isr_registered)
2244                 return;
2245
2246         /* INTx */
2247         if (!msix_enabled(adapter)) {
2248                 free_irq(netdev->irq, adapter);
2249                 goto done;
2250         }
2251
2252         /* MSIx */
2253         be_free_irq(adapter, &adapter->tx_eq, adapter);
2254
2255         for_all_rx_queues(adapter, rxo, i)
2256                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2257
2258 done:
2259         adapter->isr_registered = false;
2260 }
2261
2262 static void be_rx_queues_clear(struct be_adapter *adapter)
2263 {
2264         struct be_queue_info *q;
2265         struct be_rx_obj *rxo;
2266         int i;
2267
2268         for_all_rx_queues(adapter, rxo, i) {
2269                 q = &rxo->q;
2270                 if (q->created) {
2271                         be_cmd_rxq_destroy(adapter, q);
2272                         /* After the rxq is invalidated, wait for a grace time
2273                          * of 1ms for all dma to end and the flush compl to
2274                          * arrive
2275                          */
2276                         mdelay(1);
2277                         be_rx_q_clean(adapter, rxo);
2278                 }
2279
2280                 /* Clear any residual events */
2281                 q = &rxo->rx_eq.q;
2282                 if (q->created)
2283                         be_eq_clean(adapter, &rxo->rx_eq);
2284         }
2285 }
2286
2287 static int be_close(struct net_device *netdev)
2288 {
2289         struct be_adapter *adapter = netdev_priv(netdev);
2290         struct be_rx_obj *rxo;
2291         struct be_tx_obj *txo;
2292         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2293         int vec, i;
2294
2295         be_async_mcc_disable(adapter);
2296
2297         netif_carrier_off(netdev);
2298         adapter->link_up = false;
2299
2300         if (!lancer_chip(adapter))
2301                 be_intr_set(adapter, false);
2302
2303         for_all_rx_queues(adapter, rxo, i)
2304                 napi_disable(&rxo->rx_eq.napi);
2305
2306         napi_disable(&tx_eq->napi);
2307
2308         if (lancer_chip(adapter)) {
2309                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2310                 for_all_rx_queues(adapter, rxo, i)
2311                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2312                 for_all_tx_queues(adapter, txo, i)
2313                          be_cq_notify(adapter, txo->cq.id, false, 0);
2314         }
2315
2316         if (msix_enabled(adapter)) {
2317                 vec = be_msix_vec_get(adapter, tx_eq);
2318                 synchronize_irq(vec);
2319
2320                 for_all_rx_queues(adapter, rxo, i) {
2321                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2322                         synchronize_irq(vec);
2323                 }
2324         } else {
2325                 synchronize_irq(netdev->irq);
2326         }
2327         be_irq_unregister(adapter);
2328
2329         /* Wait for all pending tx completions to arrive so that
2330          * all tx skbs are freed.
2331          */
2332         for_all_tx_queues(adapter, txo, i)
2333                 be_tx_compl_clean(adapter, txo);
2334
2335         be_rx_queues_clear(adapter);
2336         return 0;
2337 }
2338
2339 static int be_rx_queues_setup(struct be_adapter *adapter)
2340 {
2341         struct be_rx_obj *rxo;
2342         int rc, i;
2343         u8 rsstable[MAX_RSS_QS];
2344
2345         for_all_rx_queues(adapter, rxo, i) {
2346                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2347                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2348                         adapter->if_handle,
2349                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2350                 if (rc)
2351                         return rc;
2352         }
2353
2354         if (be_multi_rxq(adapter)) {
2355                 for_all_rss_queues(adapter, rxo, i)
2356                         rsstable[i] = rxo->rss_id;
2357
2358                 rc = be_cmd_rss_config(adapter, rsstable,
2359                         adapter->num_rx_qs - 1);
2360                 if (rc)
2361                         return rc;
2362         }
2363
2364         /* First time posting */
2365         for_all_rx_queues(adapter, rxo, i) {
2366                 be_post_rx_frags(rxo, GFP_KERNEL);
2367                 napi_enable(&rxo->rx_eq.napi);
2368         }
2369         return 0;
2370 }
2371
2372 static int be_open(struct net_device *netdev)
2373 {
2374         struct be_adapter *adapter = netdev_priv(netdev);
2375         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2376         struct be_rx_obj *rxo;
2377         bool link_up;
2378         int status, i;
2379         u8 mac_speed;
2380         u16 link_speed;
2381
2382         status = be_rx_queues_setup(adapter);
2383         if (status)
2384                 goto err;
2385
2386         napi_enable(&tx_eq->napi);
2387
2388         be_irq_register(adapter);
2389
2390         if (!lancer_chip(adapter))
2391                 be_intr_set(adapter, true);
2392
2393         /* The evt queues are created in unarmed state; arm them */
2394         for_all_rx_queues(adapter, rxo, i) {
2395                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2396                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2397         }
2398         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2399
2400         /* Now that interrupts are on we can process async mcc */
2401         be_async_mcc_enable(adapter);
2402
2403         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2404                         &link_speed, 0);
2405         if (status)
2406                 goto err;
2407         be_link_status_update(adapter, link_up);
2408
2409         if (be_physfn(adapter)) {
2410                 status = be_vid_config(adapter, false, 0);
2411                 if (status)
2412                         goto err;
2413
2414                 status = be_cmd_set_flow_control(adapter,
2415                                 adapter->tx_fc, adapter->rx_fc);
2416                 if (status)
2417                         goto err;
2418         }
2419
2420         return 0;
2421 err:
2422         be_close(adapter->netdev);
2423         return -EIO;
2424 }
2425
2426 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2427 {
2428         struct be_dma_mem cmd;
2429         int status = 0;
2430         u8 mac[ETH_ALEN];
2431
2432         memset(mac, 0, ETH_ALEN);
2433
2434         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2435         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2436                                     GFP_KERNEL);
2437         if (cmd.va == NULL)
2438                 return -1;
2439         memset(cmd.va, 0, cmd.size);
2440
2441         if (enable) {
2442                 status = pci_write_config_dword(adapter->pdev,
2443                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2444                 if (status) {
2445                         dev_err(&adapter->pdev->dev,
2446                                 "Could not enable Wake-on-lan\n");
2447                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2448                                           cmd.dma);
2449                         return status;
2450                 }
2451                 status = be_cmd_enable_magic_wol(adapter,
2452                                 adapter->netdev->dev_addr, &cmd);
2453                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2454                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2455         } else {
2456                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2457                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2458                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2459         }
2460
2461         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2462         return status;
2463 }
2464
2465 /*
2466  * Generate a seed MAC address from the PF MAC Address using jhash.
2467  * MAC Address for VFs are assigned incrementally starting from the seed.
2468  * These addresses are programmed in the ASIC by the PF and the VF driver
2469  * queries for the MAC address during its probe.
2470  */
2471 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2472 {
2473         u32 vf = 0;
2474         int status = 0;
2475         u8 mac[ETH_ALEN];
2476
2477         be_vf_eth_addr_generate(adapter, mac);
2478
2479         for (vf = 0; vf < num_vfs; vf++) {
2480                 status = be_cmd_pmac_add(adapter, mac,
2481                                         adapter->vf_cfg[vf].vf_if_handle,
2482                                         &adapter->vf_cfg[vf].vf_pmac_id,
2483                                         vf + 1);
2484                 if (status)
2485                         dev_err(&adapter->pdev->dev,
2486                                 "Mac address add failed for VF %d\n", vf);
2487                 else
2488                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2489
2490                 mac[5] += 1;
2491         }
2492         return status;
2493 }
2494
2495 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2496 {
2497         u32 vf;
2498
2499         for (vf = 0; vf < num_vfs; vf++) {
2500                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2501                         be_cmd_pmac_del(adapter,
2502                                         adapter->vf_cfg[vf].vf_if_handle,
2503                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2504         }
2505 }
2506
2507 static int be_setup(struct be_adapter *adapter)
2508 {
2509         struct net_device *netdev = adapter->netdev;
2510         u32 cap_flags, en_flags, vf = 0;
2511         int status;
2512         u8 mac[ETH_ALEN];
2513
2514         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2515                                 BE_IF_FLAGS_BROADCAST |
2516                                 BE_IF_FLAGS_MULTICAST;
2517
2518         if (be_physfn(adapter)) {
2519                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2520                                 BE_IF_FLAGS_PROMISCUOUS |
2521                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2522                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2523
2524                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2525                         cap_flags |= BE_IF_FLAGS_RSS;
2526                         en_flags |= BE_IF_FLAGS_RSS;
2527                 }
2528         }
2529
2530         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2531                         netdev->dev_addr, false/* pmac_invalid */,
2532                         &adapter->if_handle, &adapter->pmac_id, 0);
2533         if (status != 0)
2534                 goto do_none;
2535
2536         if (be_physfn(adapter)) {
2537                 if (adapter->sriov_enabled) {
2538                         while (vf < num_vfs) {
2539                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2540                                                         BE_IF_FLAGS_BROADCAST;
2541                                 status = be_cmd_if_create(adapter, cap_flags,
2542                                         en_flags, mac, true,
2543                                         &adapter->vf_cfg[vf].vf_if_handle,
2544                                         NULL, vf+1);
2545                                 if (status) {
2546                                         dev_err(&adapter->pdev->dev,
2547                                         "Interface Create failed for VF %d\n",
2548                                         vf);
2549                                         goto if_destroy;
2550                                 }
2551                                 adapter->vf_cfg[vf].vf_pmac_id =
2552                                                         BE_INVALID_PMAC_ID;
2553                                 vf++;
2554                         }
2555                 }
2556         } else {
2557                 status = be_cmd_mac_addr_query(adapter, mac,
2558                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2559                 if (!status) {
2560                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2561                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2562                 }
2563         }
2564
2565         status = be_tx_queues_create(adapter);
2566         if (status != 0)
2567                 goto if_destroy;
2568
2569         status = be_rx_queues_create(adapter);
2570         if (status != 0)
2571                 goto tx_qs_destroy;
2572
2573         /* Allow all priorities by default. A GRP5 evt may modify this */
2574         adapter->vlan_prio_bmap = 0xff;
2575
2576         status = be_mcc_queues_create(adapter);
2577         if (status != 0)
2578                 goto rx_qs_destroy;
2579
2580         adapter->link_speed = -1;
2581
2582         return 0;
2583
2584 rx_qs_destroy:
2585         be_rx_queues_destroy(adapter);
2586 tx_qs_destroy:
2587         be_tx_queues_destroy(adapter);
2588 if_destroy:
2589         if (be_physfn(adapter) && adapter->sriov_enabled)
2590                 for (vf = 0; vf < num_vfs; vf++)
2591                         if (adapter->vf_cfg[vf].vf_if_handle)
2592                                 be_cmd_if_destroy(adapter,
2593                                         adapter->vf_cfg[vf].vf_if_handle,
2594                                         vf + 1);
2595         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2596 do_none:
2597         return status;
2598 }
2599
2600 static int be_clear(struct be_adapter *adapter)
2601 {
2602         int vf;
2603
2604         if (be_physfn(adapter) && adapter->sriov_enabled)
2605                 be_vf_eth_addr_rem(adapter);
2606
2607         be_mcc_queues_destroy(adapter);
2608         be_rx_queues_destroy(adapter);
2609         be_tx_queues_destroy(adapter);
2610         adapter->eq_next_idx = 0;
2611
2612         if (be_physfn(adapter) && adapter->sriov_enabled)
2613                 for (vf = 0; vf < num_vfs; vf++)
2614                         if (adapter->vf_cfg[vf].vf_if_handle)
2615                                 be_cmd_if_destroy(adapter,
2616                                         adapter->vf_cfg[vf].vf_if_handle,
2617                                         vf + 1);
2618
2619         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2620
2621         /* tell fw we're done with firing cmds */
2622         be_cmd_fw_clean(adapter);
2623         return 0;
2624 }
2625
2626
2627 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2628 static bool be_flash_redboot(struct be_adapter *adapter,
2629                         const u8 *p, u32 img_start, int image_size,
2630                         int hdr_size)
2631 {
2632         u32 crc_offset;
2633         u8 flashed_crc[4];
2634         int status;
2635
2636         crc_offset = hdr_size + img_start + image_size - 4;
2637
2638         p += crc_offset;
2639
2640         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2641                         (image_size - 4));
2642         if (status) {
2643                 dev_err(&adapter->pdev->dev,
2644                 "could not get crc from flash, not flashing redboot\n");
2645                 return false;
2646         }
2647
2648         /*update redboot only if crc does not match*/
2649         if (!memcmp(flashed_crc, p, 4))
2650                 return false;
2651         else
2652                 return true;
2653 }
2654
2655 static int be_flash_data(struct be_adapter *adapter,
2656                         const struct firmware *fw,
2657                         struct be_dma_mem *flash_cmd, int num_of_images)
2658
2659 {
2660         int status = 0, i, filehdr_size = 0;
2661         u32 total_bytes = 0, flash_op;
2662         int num_bytes;
2663         const u8 *p = fw->data;
2664         struct be_cmd_write_flashrom *req = flash_cmd->va;
2665         const struct flash_comp *pflashcomp;
2666         int num_comp;
2667
2668         static const struct flash_comp gen3_flash_types[9] = {
2669                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2670                         FLASH_IMAGE_MAX_SIZE_g3},
2671                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2672                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2673                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2674                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2675                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2676                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2677                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2678                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2679                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2680                         FLASH_IMAGE_MAX_SIZE_g3},
2681                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2682                         FLASH_IMAGE_MAX_SIZE_g3},
2683                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2684                         FLASH_IMAGE_MAX_SIZE_g3},
2685                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2686                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2687         };
2688         static const struct flash_comp gen2_flash_types[8] = {
2689                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2690                         FLASH_IMAGE_MAX_SIZE_g2},
2691                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2692                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2693                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2694                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2695                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2696                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2697                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2698                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2699                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2700                         FLASH_IMAGE_MAX_SIZE_g2},
2701                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2702                         FLASH_IMAGE_MAX_SIZE_g2},
2703                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2704                          FLASH_IMAGE_MAX_SIZE_g2}
2705         };
2706
2707         if (adapter->generation == BE_GEN3) {
2708                 pflashcomp = gen3_flash_types;
2709                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2710                 num_comp = ARRAY_SIZE(gen3_flash_types);
2711         } else {
2712                 pflashcomp = gen2_flash_types;
2713                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2714                 num_comp = ARRAY_SIZE(gen2_flash_types);
2715         }
2716         for (i = 0; i < num_comp; i++) {
2717                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2718                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2719                         continue;
2720                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2721                         (!be_flash_redboot(adapter, fw->data,
2722                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2723                         (num_of_images * sizeof(struct image_hdr)))))
2724                         continue;
2725                 p = fw->data;
2726                 p += filehdr_size + pflashcomp[i].offset
2727                         + (num_of_images * sizeof(struct image_hdr));
2728         if (p + pflashcomp[i].size > fw->data + fw->size)
2729                 return -1;
2730         total_bytes = pflashcomp[i].size;
2731                 while (total_bytes) {
2732                         if (total_bytes > 32*1024)
2733                                 num_bytes = 32*1024;
2734                         else
2735                                 num_bytes = total_bytes;
2736                         total_bytes -= num_bytes;
2737
2738                         if (!total_bytes)
2739                                 flash_op = FLASHROM_OPER_FLASH;
2740                         else
2741                                 flash_op = FLASHROM_OPER_SAVE;
2742                         memcpy(req->params.data_buf, p, num_bytes);
2743                         p += num_bytes;
2744                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2745                                 pflashcomp[i].optype, flash_op, num_bytes);
2746                         if (status) {
2747                                 dev_err(&adapter->pdev->dev,
2748                                         "cmd to write to flash rom failed.\n");
2749                                 return -1;
2750                         }
2751                 }
2752         }
2753         return 0;
2754 }
2755
2756 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2757 {
2758         if (fhdr == NULL)
2759                 return 0;
2760         if (fhdr->build[0] == '3')
2761                 return BE_GEN3;
2762         else if (fhdr->build[0] == '2')
2763                 return BE_GEN2;
2764         else
2765                 return 0;
2766 }
2767
2768 static int lancer_fw_download(struct be_adapter *adapter,
2769                                 const struct firmware *fw)
2770 {
2771 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2772 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2773         struct be_dma_mem flash_cmd;
2774         const u8 *data_ptr = NULL;
2775         u8 *dest_image_ptr = NULL;
2776         size_t image_size = 0;
2777         u32 chunk_size = 0;
2778         u32 data_written = 0;
2779         u32 offset = 0;
2780         int status = 0;
2781         u8 add_status = 0;
2782
2783         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2784                 dev_err(&adapter->pdev->dev,
2785                         "FW Image not properly aligned. "
2786                         "Length must be 4 byte aligned.\n");
2787                 status = -EINVAL;
2788                 goto lancer_fw_exit;
2789         }
2790
2791         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2792                                 + LANCER_FW_DOWNLOAD_CHUNK;
2793         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2794                                                 &flash_cmd.dma, GFP_KERNEL);
2795         if (!flash_cmd.va) {
2796                 status = -ENOMEM;
2797                 dev_err(&adapter->pdev->dev,
2798                         "Memory allocation failure while flashing\n");
2799                 goto lancer_fw_exit;
2800         }
2801
2802         dest_image_ptr = flash_cmd.va +
2803                                 sizeof(struct lancer_cmd_req_write_object);
2804         image_size = fw->size;
2805         data_ptr = fw->data;
2806
2807         while (image_size) {
2808                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2809
2810                 /* Copy the image chunk content. */
2811                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2812
2813                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2814                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2815                                 &data_written, &add_status);
2816
2817                 if (status)
2818                         break;
2819
2820                 offset += data_written;
2821                 data_ptr += data_written;
2822                 image_size -= data_written;
2823         }
2824
2825         if (!status) {
2826                 /* Commit the FW written */
2827                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2828                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2829                                         &data_written, &add_status);
2830         }
2831
2832         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2833                                 flash_cmd.dma);
2834         if (status) {
2835                 dev_err(&adapter->pdev->dev,
2836                         "Firmware load error. "
2837                         "Status code: 0x%x Additional Status: 0x%x\n",
2838                         status, add_status);
2839                 goto lancer_fw_exit;
2840         }
2841
2842         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2843 lancer_fw_exit:
2844         return status;
2845 }
2846
2847 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2848 {
2849         struct flash_file_hdr_g2 *fhdr;
2850         struct flash_file_hdr_g3 *fhdr3;
2851         struct image_hdr *img_hdr_ptr = NULL;
2852         struct be_dma_mem flash_cmd;
2853         const u8 *p;
2854         int status = 0, i = 0, num_imgs = 0;
2855
2856         p = fw->data;
2857         fhdr = (struct flash_file_hdr_g2 *) p;
2858
2859         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2860         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2861                                           &flash_cmd.dma, GFP_KERNEL);
2862         if (!flash_cmd.va) {
2863                 status = -ENOMEM;
2864                 dev_err(&adapter->pdev->dev,
2865                         "Memory allocation failure while flashing\n");
2866                 goto be_fw_exit;
2867         }
2868
2869         if ((adapter->generation == BE_GEN3) &&
2870                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2871                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2872                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2873                 for (i = 0; i < num_imgs; i++) {
2874                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2875                                         (sizeof(struct flash_file_hdr_g3) +
2876                                          i * sizeof(struct image_hdr)));
2877                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2878                                 status = be_flash_data(adapter, fw, &flash_cmd,
2879                                                         num_imgs);
2880                 }
2881         } else if ((adapter->generation == BE_GEN2) &&
2882                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2883                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2884         } else {
2885                 dev_err(&adapter->pdev->dev,
2886                         "UFI and Interface are not compatible for flashing\n");
2887                 status = -1;
2888         }
2889
2890         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2891                           flash_cmd.dma);
2892         if (status) {
2893                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2894                 goto be_fw_exit;
2895         }
2896
2897         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2898
2899 be_fw_exit:
2900         return status;
2901 }
2902
2903 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2904 {
2905         const struct firmware *fw;
2906         int status;
2907
2908         if (!netif_running(adapter->netdev)) {
2909                 dev_err(&adapter->pdev->dev,
2910                         "Firmware load not allowed (interface is down)\n");
2911                 return -1;
2912         }
2913
2914         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2915         if (status)
2916                 goto fw_exit;
2917
2918         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2919
2920         if (lancer_chip(adapter))
2921                 status = lancer_fw_download(adapter, fw);
2922         else
2923                 status = be_fw_download(adapter, fw);
2924
2925 fw_exit:
2926         release_firmware(fw);
2927         return status;
2928 }
2929
2930 static struct net_device_ops be_netdev_ops = {
2931         .ndo_open               = be_open,
2932         .ndo_stop               = be_close,
2933         .ndo_start_xmit         = be_xmit,
2934         .ndo_set_rx_mode        = be_set_multicast_list,
2935         .ndo_set_mac_address    = be_mac_addr_set,
2936         .ndo_change_mtu         = be_change_mtu,
2937         .ndo_validate_addr      = eth_validate_addr,
2938         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2939         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2940         .ndo_set_vf_mac         = be_set_vf_mac,
2941         .ndo_set_vf_vlan        = be_set_vf_vlan,
2942         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2943         .ndo_get_vf_config      = be_get_vf_config
2944 };
2945
2946 static void be_netdev_init(struct net_device *netdev)
2947 {
2948         struct be_adapter *adapter = netdev_priv(netdev);
2949         struct be_rx_obj *rxo;
2950         int i;
2951
2952         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2953                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2954                 NETIF_F_HW_VLAN_TX;
2955         if (be_multi_rxq(adapter))
2956                 netdev->hw_features |= NETIF_F_RXHASH;
2957
2958         netdev->features |= netdev->hw_features |
2959                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2960
2961         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2962                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2963
2964         netdev->flags |= IFF_MULTICAST;
2965
2966         /* Default settings for Rx and Tx flow control */
2967         adapter->rx_fc = true;
2968         adapter->tx_fc = true;
2969
2970         netif_set_gso_max_size(netdev, 65535);
2971
2972         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2973
2974         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2975
2976         for_all_rx_queues(adapter, rxo, i)
2977                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2978                                 BE_NAPI_WEIGHT);
2979
2980         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2981                 BE_NAPI_WEIGHT);
2982 }
2983
2984 static void be_unmap_pci_bars(struct be_adapter *adapter)
2985 {
2986         if (adapter->csr)
2987                 iounmap(adapter->csr);
2988         if (adapter->db)
2989                 iounmap(adapter->db);
2990         if (adapter->pcicfg && be_physfn(adapter))
2991                 iounmap(adapter->pcicfg);
2992 }
2993
2994 static int be_map_pci_bars(struct be_adapter *adapter)
2995 {
2996         u8 __iomem *addr;
2997         int pcicfg_reg, db_reg;
2998
2999         if (lancer_chip(adapter)) {
3000                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3001                         pci_resource_len(adapter->pdev, 0));
3002                 if (addr == NULL)
3003                         return -ENOMEM;
3004                 adapter->db = addr;
3005                 return 0;
3006         }
3007
3008         if (be_physfn(adapter)) {
3009                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3010                                 pci_resource_len(adapter->pdev, 2));
3011                 if (addr == NULL)
3012                         return -ENOMEM;
3013                 adapter->csr = addr;
3014         }
3015
3016         if (adapter->generation == BE_GEN2) {
3017                 pcicfg_reg = 1;
3018                 db_reg = 4;
3019         } else {
3020                 pcicfg_reg = 0;
3021                 if (be_physfn(adapter))
3022                         db_reg = 4;
3023                 else
3024                         db_reg = 0;
3025         }
3026         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3027                                 pci_resource_len(adapter->pdev, db_reg));
3028         if (addr == NULL)
3029                 goto pci_map_err;
3030         adapter->db = addr;
3031
3032         if (be_physfn(adapter)) {
3033                 addr = ioremap_nocache(
3034                                 pci_resource_start(adapter->pdev, pcicfg_reg),
3035                                 pci_resource_len(adapter->pdev, pcicfg_reg));
3036                 if (addr == NULL)
3037                         goto pci_map_err;
3038                 adapter->pcicfg = addr;
3039         } else
3040                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
3041
3042         return 0;
3043 pci_map_err:
3044         be_unmap_pci_bars(adapter);
3045         return -ENOMEM;
3046 }
3047
3048
3049 static void be_ctrl_cleanup(struct be_adapter *adapter)
3050 {
3051         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3052
3053         be_unmap_pci_bars(adapter);
3054
3055         if (mem->va)
3056                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3057                                   mem->dma);
3058
3059         mem = &adapter->mc_cmd_mem;
3060         if (mem->va)
3061                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3062                                   mem->dma);
3063 }
3064
3065 static int be_ctrl_init(struct be_adapter *adapter)
3066 {
3067         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3068         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3069         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
3070         int status;
3071
3072         status = be_map_pci_bars(adapter);
3073         if (status)
3074                 goto done;
3075
3076         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3077         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3078                                                 mbox_mem_alloc->size,
3079                                                 &mbox_mem_alloc->dma,
3080                                                 GFP_KERNEL);
3081         if (!mbox_mem_alloc->va) {
3082                 status = -ENOMEM;
3083                 goto unmap_pci_bars;
3084         }
3085
3086         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3087         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3088         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3089         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3090
3091         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
3092         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3093                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
3094                                             GFP_KERNEL);
3095         if (mc_cmd_mem->va == NULL) {
3096                 status = -ENOMEM;
3097                 goto free_mbox;
3098         }
3099         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3100
3101         mutex_init(&adapter->mbox_lock);
3102         spin_lock_init(&adapter->mcc_lock);
3103         spin_lock_init(&adapter->mcc_cq_lock);
3104
3105         init_completion(&adapter->flash_compl);
3106         pci_save_state(adapter->pdev);
3107         return 0;
3108
3109 free_mbox:
3110         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3111                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3112
3113 unmap_pci_bars:
3114         be_unmap_pci_bars(adapter);
3115
3116 done:
3117         return status;
3118 }
3119
3120 static void be_stats_cleanup(struct be_adapter *adapter)
3121 {
3122         struct be_dma_mem *cmd = &adapter->stats_cmd;
3123
3124         if (cmd->va)
3125                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3126                                   cmd->va, cmd->dma);
3127 }
3128
3129 static int be_stats_init(struct be_adapter *adapter)
3130 {
3131         struct be_dma_mem *cmd = &adapter->stats_cmd;
3132
3133         if (adapter->generation == BE_GEN2) {
3134                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3135         } else {
3136                 if (lancer_chip(adapter))
3137                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3138                 else
3139                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3140         }
3141         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3142                                      GFP_KERNEL);
3143         if (cmd->va == NULL)
3144                 return -1;
3145         memset(cmd->va, 0, cmd->size);
3146         return 0;
3147 }
3148
3149 static void __devexit be_remove(struct pci_dev *pdev)
3150 {
3151         struct be_adapter *adapter = pci_get_drvdata(pdev);
3152
3153         if (!adapter)
3154                 return;
3155
3156         cancel_delayed_work_sync(&adapter->work);
3157
3158         unregister_netdev(adapter->netdev);
3159
3160         be_clear(adapter);
3161
3162         be_stats_cleanup(adapter);
3163
3164         be_ctrl_cleanup(adapter);
3165
3166         kfree(adapter->vf_cfg);
3167         be_sriov_disable(adapter);
3168
3169         be_msix_disable(adapter);
3170
3171         pci_set_drvdata(pdev, NULL);
3172         pci_release_regions(pdev);
3173         pci_disable_device(pdev);
3174
3175         free_netdev(adapter->netdev);
3176 }
3177
3178 static int be_get_config(struct be_adapter *adapter)
3179 {
3180         int status;
3181         u8 mac[ETH_ALEN];
3182
3183         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3184         if (status)
3185                 return status;
3186
3187         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3188                         &adapter->function_mode, &adapter->function_caps);
3189         if (status)
3190                 return status;
3191
3192         memset(mac, 0, ETH_ALEN);
3193
3194         /* A default permanent address is given to each VF for Lancer*/
3195         if (be_physfn(adapter) || lancer_chip(adapter)) {
3196                 status = be_cmd_mac_addr_query(adapter, mac,
3197                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3198
3199                 if (status)
3200                         return status;
3201
3202                 if (!is_valid_ether_addr(mac))
3203                         return -EADDRNOTAVAIL;
3204
3205                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3206                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3207         }
3208
3209         if (adapter->function_mode & 0x400)
3210                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3211         else
3212                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3213
3214         status = be_cmd_get_cntl_attributes(adapter);
3215         if (status)
3216                 return status;
3217
3218         be_cmd_check_native_mode(adapter);
3219
3220         if ((num_vfs && adapter->sriov_enabled) ||
3221                 (adapter->function_mode & 0x400) ||
3222                 lancer_chip(adapter) || !be_physfn(adapter)) {
3223                 adapter->num_tx_qs = 1;
3224                 netif_set_real_num_tx_queues(adapter->netdev,
3225                         adapter->num_tx_qs);
3226         } else {
3227                 adapter->num_tx_qs = MAX_TX_QS;
3228         }
3229
3230         return 0;
3231 }
3232
3233 static int be_dev_family_check(struct be_adapter *adapter)
3234 {
3235         struct pci_dev *pdev = adapter->pdev;
3236         u32 sli_intf = 0, if_type;
3237
3238         switch (pdev->device) {
3239         case BE_DEVICE_ID1:
3240         case OC_DEVICE_ID1:
3241                 adapter->generation = BE_GEN2;
3242                 break;
3243         case BE_DEVICE_ID2:
3244         case OC_DEVICE_ID2:
3245                 adapter->generation = BE_GEN3;
3246                 break;
3247         case OC_DEVICE_ID3:
3248         case OC_DEVICE_ID4:
3249                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3250                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3251                                                 SLI_INTF_IF_TYPE_SHIFT;
3252
3253                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3254                         if_type != 0x02) {
3255                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3256                         return -EINVAL;
3257                 }
3258                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3259                                          SLI_INTF_FAMILY_SHIFT);
3260                 adapter->generation = BE_GEN3;
3261                 break;
3262         default:
3263                 adapter->generation = 0;
3264         }
3265         return 0;
3266 }
3267
3268 static int lancer_wait_ready(struct be_adapter *adapter)
3269 {
3270 #define SLIPORT_READY_TIMEOUT 500
3271         u32 sliport_status;
3272         int status = 0, i;
3273
3274         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3275                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3276                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3277                         break;
3278
3279                 msleep(20);
3280         }
3281
3282         if (i == SLIPORT_READY_TIMEOUT)
3283                 status = -1;
3284
3285         return status;
3286 }
3287
3288 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3289 {
3290         int status;
3291         u32 sliport_status, err, reset_needed;
3292         status = lancer_wait_ready(adapter);
3293         if (!status) {
3294                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3295                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3296                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3297                 if (err && reset_needed) {
3298                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3299                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3300
3301                         /* check adapter has corrected the error */
3302                         status = lancer_wait_ready(adapter);
3303                         sliport_status = ioread32(adapter->db +
3304                                                         SLIPORT_STATUS_OFFSET);
3305                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3306                                                 SLIPORT_STATUS_RN_MASK);
3307                         if (status || sliport_status)
3308                                 status = -1;
3309                 } else if (err || reset_needed) {
3310                         status = -1;
3311                 }
3312         }
3313         return status;
3314 }
3315
3316 static int __devinit be_probe(struct pci_dev *pdev,
3317                         const struct pci_device_id *pdev_id)
3318 {
3319         int status = 0;
3320         struct be_adapter *adapter;
3321         struct net_device *netdev;
3322
3323         status = pci_enable_device(pdev);
3324         if (status)
3325                 goto do_none;
3326
3327         status = pci_request_regions(pdev, DRV_NAME);
3328         if (status)
3329                 goto disable_dev;
3330         pci_set_master(pdev);
3331
3332         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3333         if (netdev == NULL) {
3334                 status = -ENOMEM;
3335                 goto rel_reg;
3336         }
3337         adapter = netdev_priv(netdev);
3338         adapter->pdev = pdev;
3339         pci_set_drvdata(pdev, adapter);
3340
3341         status = be_dev_family_check(adapter);
3342         if (status)
3343                 goto free_netdev;
3344
3345         adapter->netdev = netdev;
3346         SET_NETDEV_DEV(netdev, &pdev->dev);
3347
3348         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3349         if (!status) {
3350                 netdev->features |= NETIF_F_HIGHDMA;
3351         } else {
3352                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3353                 if (status) {
3354                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3355                         goto free_netdev;
3356                 }
3357         }
3358
3359         be_sriov_enable(adapter);
3360         if (adapter->sriov_enabled) {
3361                 adapter->vf_cfg = kcalloc(num_vfs,
3362                         sizeof(struct be_vf_cfg), GFP_KERNEL);
3363
3364                 if (!adapter->vf_cfg)
3365                         goto free_netdev;
3366         }
3367
3368         status = be_ctrl_init(adapter);
3369         if (status)
3370                 goto free_vf_cfg;
3371
3372         if (lancer_chip(adapter)) {
3373                 status = lancer_test_and_set_rdy_state(adapter);
3374                 if (status) {
3375                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3376                         goto ctrl_clean;
3377                 }
3378         }
3379
3380         /* sync up with fw's ready state */
3381         if (be_physfn(adapter)) {
3382                 status = be_cmd_POST(adapter);
3383                 if (status)
3384                         goto ctrl_clean;
3385         }
3386
3387         /* tell fw we're ready to fire cmds */
3388         status = be_cmd_fw_init(adapter);
3389         if (status)
3390                 goto ctrl_clean;
3391
3392         status = be_cmd_reset_function(adapter);
3393         if (status)
3394                 goto ctrl_clean;
3395
3396         status = be_stats_init(adapter);
3397         if (status)
3398                 goto ctrl_clean;
3399
3400         status = be_get_config(adapter);
3401         if (status)
3402                 goto stats_clean;
3403
3404         /* The INTR bit may be set in the card when probed by a kdump kernel
3405          * after a crash.
3406          */
3407         if (!lancer_chip(adapter))
3408                 be_intr_set(adapter, false);
3409
3410         be_msix_enable(adapter);
3411
3412         INIT_DELAYED_WORK(&adapter->work, be_worker);
3413
3414         status = be_setup(adapter);
3415         if (status)
3416                 goto msix_disable;
3417
3418         be_netdev_init(netdev);
3419         status = register_netdev(netdev);
3420         if (status != 0)
3421                 goto unsetup;
3422         netif_carrier_off(netdev);
3423
3424         if (be_physfn(adapter) && adapter->sriov_enabled) {
3425                 u8 mac_speed;
3426                 bool link_up;
3427                 u16 vf, lnk_speed;
3428
3429                 if (!lancer_chip(adapter)) {
3430                         status = be_vf_eth_addr_config(adapter);
3431                         if (status)
3432                                 goto unreg_netdev;
3433                 }
3434
3435                 for (vf = 0; vf < num_vfs; vf++) {
3436                         status = be_cmd_link_status_query(adapter, &link_up,
3437                                         &mac_speed, &lnk_speed, vf + 1);
3438                         if (!status)
3439                                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3440                         else
3441                                 goto unreg_netdev;
3442                 }
3443         }
3444
3445         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3446
3447         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3448         return 0;
3449
3450 unreg_netdev:
3451         unregister_netdev(netdev);
3452 unsetup:
3453         be_clear(adapter);
3454 msix_disable:
3455         be_msix_disable(adapter);
3456 stats_clean:
3457         be_stats_cleanup(adapter);
3458 ctrl_clean:
3459         be_ctrl_cleanup(adapter);
3460 free_vf_cfg:
3461         kfree(adapter->vf_cfg);
3462 free_netdev:
3463         be_sriov_disable(adapter);
3464         free_netdev(netdev);
3465         pci_set_drvdata(pdev, NULL);
3466 rel_reg:
3467         pci_release_regions(pdev);
3468 disable_dev:
3469         pci_disable_device(pdev);
3470 do_none:
3471         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3472         return status;
3473 }
3474
3475 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3476 {
3477         struct be_adapter *adapter = pci_get_drvdata(pdev);
3478         struct net_device *netdev =  adapter->netdev;
3479
3480         cancel_delayed_work_sync(&adapter->work);
3481         if (adapter->wol)
3482                 be_setup_wol(adapter, true);
3483
3484         netif_device_detach(netdev);
3485         if (netif_running(netdev)) {
3486                 rtnl_lock();
3487                 be_close(netdev);
3488                 rtnl_unlock();
3489         }
3490         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3491         be_clear(adapter);
3492
3493         be_msix_disable(adapter);
3494         pci_save_state(pdev);
3495         pci_disable_device(pdev);
3496         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3497         return 0;
3498 }
3499
3500 static int be_resume(struct pci_dev *pdev)
3501 {
3502         int status = 0;
3503         struct be_adapter *adapter = pci_get_drvdata(pdev);
3504         struct net_device *netdev =  adapter->netdev;
3505
3506         netif_device_detach(netdev);
3507
3508         status = pci_enable_device(pdev);
3509         if (status)
3510                 return status;
3511
3512         pci_set_power_state(pdev, 0);
3513         pci_restore_state(pdev);
3514
3515         be_msix_enable(adapter);
3516         /* tell fw we're ready to fire cmds */
3517         status = be_cmd_fw_init(adapter);
3518         if (status)
3519                 return status;
3520
3521         be_setup(adapter);
3522         if (netif_running(netdev)) {
3523                 rtnl_lock();
3524                 be_open(netdev);
3525                 rtnl_unlock();
3526         }
3527         netif_device_attach(netdev);
3528
3529         if (adapter->wol)
3530                 be_setup_wol(adapter, false);
3531
3532         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3533         return 0;
3534 }
3535
3536 /*
3537  * An FLR will stop BE from DMAing any data.
3538  */
3539 static void be_shutdown(struct pci_dev *pdev)
3540 {
3541         struct be_adapter *adapter = pci_get_drvdata(pdev);
3542
3543         if (!adapter)
3544                 return;
3545
3546         cancel_delayed_work_sync(&adapter->work);
3547
3548         netif_device_detach(adapter->netdev);
3549
3550         if (adapter->wol)
3551                 be_setup_wol(adapter, true);
3552
3553         be_cmd_reset_function(adapter);
3554
3555         pci_disable_device(pdev);
3556 }
3557
3558 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3559                                 pci_channel_state_t state)
3560 {
3561         struct be_adapter *adapter = pci_get_drvdata(pdev);
3562         struct net_device *netdev =  adapter->netdev;
3563
3564         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3565
3566         adapter->eeh_err = true;
3567
3568         netif_device_detach(netdev);
3569
3570         if (netif_running(netdev)) {
3571                 rtnl_lock();
3572                 be_close(netdev);
3573                 rtnl_unlock();
3574         }
3575         be_clear(adapter);
3576
3577         if (state == pci_channel_io_perm_failure)
3578                 return PCI_ERS_RESULT_DISCONNECT;
3579
3580         pci_disable_device(pdev);
3581
3582         return PCI_ERS_RESULT_NEED_RESET;
3583 }
3584
3585 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3586 {
3587         struct be_adapter *adapter = pci_get_drvdata(pdev);
3588         int status;
3589
3590         dev_info(&adapter->pdev->dev, "EEH reset\n");
3591         adapter->eeh_err = false;
3592
3593         status = pci_enable_device(pdev);
3594         if (status)
3595                 return PCI_ERS_RESULT_DISCONNECT;
3596
3597         pci_set_master(pdev);
3598         pci_set_power_state(pdev, 0);
3599         pci_restore_state(pdev);
3600
3601         /* Check if card is ok and fw is ready */
3602         status = be_cmd_POST(adapter);
3603         if (status)
3604                 return PCI_ERS_RESULT_DISCONNECT;
3605
3606         return PCI_ERS_RESULT_RECOVERED;
3607 }
3608
3609 static void be_eeh_resume(struct pci_dev *pdev)
3610 {
3611         int status = 0;
3612         struct be_adapter *adapter = pci_get_drvdata(pdev);
3613         struct net_device *netdev =  adapter->netdev;
3614
3615         dev_info(&adapter->pdev->dev, "EEH resume\n");
3616
3617         pci_save_state(pdev);
3618
3619         /* tell fw we're ready to fire cmds */
3620         status = be_cmd_fw_init(adapter);
3621         if (status)
3622                 goto err;
3623
3624         status = be_setup(adapter);
3625         if (status)
3626                 goto err;
3627
3628         if (netif_running(netdev)) {
3629                 status = be_open(netdev);
3630                 if (status)
3631                         goto err;
3632         }
3633         netif_device_attach(netdev);
3634         return;
3635 err:
3636         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3637 }
3638
3639 static struct pci_error_handlers be_eeh_handlers = {
3640         .error_detected = be_eeh_err_detected,
3641         .slot_reset = be_eeh_reset,
3642         .resume = be_eeh_resume,
3643 };
3644
3645 static struct pci_driver be_driver = {
3646         .name = DRV_NAME,
3647         .id_table = be_dev_ids,
3648         .probe = be_probe,
3649         .remove = be_remove,
3650         .suspend = be_suspend,
3651         .resume = be_resume,
3652         .shutdown = be_shutdown,
3653         .err_handler = &be_eeh_handlers
3654 };
3655
3656 static int __init be_init_module(void)
3657 {
3658         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3659             rx_frag_size != 2048) {
3660                 printk(KERN_WARNING DRV_NAME
3661                         " : Module param rx_frag_size must be 2048/4096/8192."
3662                         " Using 2048\n");
3663                 rx_frag_size = 2048;
3664         }
3665
3666         return pci_register_driver(&be_driver);
3667 }
3668 module_init(be_init_module);
3669
3670 static void __exit be_exit_module(void)
3671 {
3672         pci_unregister_driver(&be_driver);
3673 }
3674 module_exit(be_exit_module);