Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46         { 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51         "CEV",
52         "CTX",
53         "DBUF",
54         "ERX",
55         "Host",
56         "MPU",
57         "NDMA",
58         "PTC ",
59         "RDMA ",
60         "RXF ",
61         "RXIPS ",
62         "RXULP0 ",
63         "RXULP1 ",
64         "RXULP2 ",
65         "TIM ",
66         "TPOST ",
67         "TPRE ",
68         "TXIPS ",
69         "TXULP0 ",
70         "TXULP1 ",
71         "UC ",
72         "WDMA ",
73         "TXULP2 ",
74         "HOST1 ",
75         "P0_OB_LINK ",
76         "P1_OB_LINK ",
77         "HOST_GPIO ",
78         "MBOX ",
79         "AXGMAC0",
80         "AXGMAC1",
81         "JTAG",
82         "MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86         "LPCMEMHOST",
87         "MGMT_MAC",
88         "PCS0ONLINE",
89         "MPU_IRAM",
90         "PCS1ONLINE",
91         "PCTL0",
92         "PCTL1",
93         "PMEM",
94         "RR",
95         "TXPB",
96         "RXPP",
97         "XAUI",
98         "TXP",
99         "ARM",
100         "IPC",
101         "HOST2",
102         "HOST3",
103         "HOST4",
104         "HOST5",
105         "HOST6",
106         "HOST7",
107         "HOST8",
108         "HOST9",
109         "NETC",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown"
118 };
119
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122         return (adapter->function_mode & FLEX10_MODE ||
123                 adapter->function_mode & VNIC_MODE ||
124                 adapter->function_mode & UMC_ENABLED);
125 }
126
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129         struct be_dma_mem *mem = &q->dma_mem;
130         if (mem->va) {
131                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132                                   mem->dma);
133                 mem->va = NULL;
134         }
135 }
136
137 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138                 u16 len, u16 entry_size)
139 {
140         struct be_dma_mem *mem = &q->dma_mem;
141
142         memset(q, 0, sizeof(*q));
143         q->len = len;
144         q->entry_size = entry_size;
145         mem->size = len * entry_size;
146         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147                                      GFP_KERNEL);
148         if (!mem->va)
149                 return -ENOMEM;
150         memset(mem->va, 0, mem->size);
151         return 0;
152 }
153
154 static void be_intr_set(struct be_adapter *adapter, bool enable)
155 {
156         u32 reg, enabled;
157
158         if (adapter->eeh_err)
159                 return;
160
161         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162                                 &reg);
163         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165         if (!enabled && enable)
166                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else if (enabled && !enable)
168                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else
170                 return;
171
172         pci_write_config_dword(adapter->pdev,
173                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
176 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178         u32 val = 0;
179         val |= qid & DB_RQ_RING_ID_MASK;
180         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181
182         wmb();
183         iowrite32(val, adapter->db + DB_RQ_OFFSET);
184 }
185
186 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188         u32 val = 0;
189         val |= qid & DB_TXULP_RING_ID_MASK;
190         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191
192         wmb();
193         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194 }
195
196 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197                 bool arm, bool clear_int, u16 num_popped)
198 {
199         u32 val = 0;
200         val |= qid & DB_EQ_RING_ID_MASK;
201         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
203
204         if (adapter->eeh_err)
205                 return;
206
207         if (arm)
208                 val |= 1 << DB_EQ_REARM_SHIFT;
209         if (clear_int)
210                 val |= 1 << DB_EQ_CLR_SHIFT;
211         val |= 1 << DB_EQ_EVNT_SHIFT;
212         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213         iowrite32(val, adapter->db + DB_EQ_OFFSET);
214 }
215
216 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217 {
218         u32 val = 0;
219         val |= qid & DB_CQ_RING_ID_MASK;
220         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
222
223         if (adapter->eeh_err)
224                 return;
225
226         if (arm)
227                 val |= 1 << DB_CQ_REARM_SHIFT;
228         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229         iowrite32(val, adapter->db + DB_CQ_OFFSET);
230 }
231
232 static int be_mac_addr_set(struct net_device *netdev, void *p)
233 {
234         struct be_adapter *adapter = netdev_priv(netdev);
235         struct sockaddr *addr = p;
236         int status = 0;
237         u8 current_mac[ETH_ALEN];
238         u32 pmac_id = adapter->pmac_id[0];
239
240         if (!is_valid_ether_addr(addr->sa_data))
241                 return -EADDRNOTAVAIL;
242
243         status = be_cmd_mac_addr_query(adapter, current_mac,
244                                 MAC_ADDRESS_TYPE_NETWORK, false,
245                                 adapter->if_handle, 0);
246         if (status)
247                 goto err;
248
249         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251                                 adapter->if_handle, &adapter->pmac_id[0], 0);
252                 if (status)
253                         goto err;
254
255                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256         }
257         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258         return 0;
259 err:
260         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261         return status;
262 }
263
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269         struct be_port_rxf_stats_v0 *port_stats =
270                                         &rxf_stats->port[adapter->port_num];
271         struct be_drv_stats *drvs = &adapter->drv_stats;
272
273         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274         drvs->rx_pause_frames = port_stats->rx_pause_frames;
275         drvs->rx_crc_errors = port_stats->rx_crc_errors;
276         drvs->rx_control_frames = port_stats->rx_control_frames;
277         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289         drvs->rx_dropped_header_too_small =
290                 port_stats->rx_dropped_header_too_small;
291         drvs->rx_address_mismatch_drops =
292                                         port_stats->rx_address_mismatch_drops +
293                                         port_stats->rx_vlan_mismatch_drops;
294         drvs->rx_alignment_symbol_errors =
295                 port_stats->rx_alignment_symbol_errors;
296
297         drvs->tx_pauseframes = port_stats->tx_pauseframes;
298         drvs->tx_controlframes = port_stats->tx_controlframes;
299
300         if (adapter->port_num)
301                 drvs->jabber_events = rxf_stats->port1_jabber_events;
302         else
303                 drvs->jabber_events = rxf_stats->port0_jabber_events;
304         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306         drvs->forwarded_packets = rxf_stats->forwarded_packets;
307         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318         struct be_port_rxf_stats_v1 *port_stats =
319                                         &rxf_stats->port[adapter->port_num];
320         struct be_drv_stats *drvs = &adapter->drv_stats;
321
322         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325         drvs->rx_pause_frames = port_stats->rx_pause_frames;
326         drvs->rx_crc_errors = port_stats->rx_crc_errors;
327         drvs->rx_control_frames = port_stats->rx_control_frames;
328         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338         drvs->rx_dropped_header_too_small =
339                 port_stats->rx_dropped_header_too_small;
340         drvs->rx_input_fifo_overflow_drop =
341                 port_stats->rx_input_fifo_overflow_drop;
342         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343         drvs->rx_alignment_symbol_errors =
344                 port_stats->rx_alignment_symbol_errors;
345         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346         drvs->tx_pauseframes = port_stats->tx_pauseframes;
347         drvs->tx_controlframes = port_stats->tx_controlframes;
348         drvs->jabber_events = port_stats->jabber_events;
349         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351         drvs->forwarded_packets = rxf_stats->forwarded_packets;
352         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361         struct be_drv_stats *drvs = &adapter->drv_stats;
362         struct lancer_pport_stats *pport_stats =
363                                         pport_stats_from_cmd(adapter);
364
365         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375         drvs->rx_dropped_tcp_length =
376                                 pport_stats->rx_dropped_invalid_tcp_length;
377         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380         drvs->rx_dropped_header_too_small =
381                                 pport_stats->rx_dropped_header_too_small;
382         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383         drvs->rx_address_mismatch_drops =
384                                         pport_stats->rx_address_mismatch_drops +
385                                         pport_stats->rx_vlan_mismatch_drops;
386         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390         drvs->jabber_events = pport_stats->rx_jabbers;
391         drvs->forwarded_packets = pport_stats->num_forwards_lo;
392         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393         drvs->rx_drops_too_many_frags =
394                                 pport_stats->rx_drops_too_many_frags_lo;
395 }
396
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x)                   (x & 0xFFFF)
400 #define hi(x)                   (x & 0xFFFF0000)
401         bool wrapped = val < lo(*acc);
402         u32 newacc = hi(*acc) + val;
403
404         if (wrapped)
405                 newacc += 65536;
406         ACCESS_ONCE(*acc) = newacc;
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412         struct be_rx_obj *rxo;
413         int i;
414
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423
424         if (lancer_chip(adapter))
425                 goto done;
426
427         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
428         for_all_rx_queues(adapter, rxo, i) {
429                 /* below erx HW counter can actually wrap around after
430                  * 65535. Driver accumulates a 32-bit value
431                  */
432                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434         }
435 done:
436         return;
437 }
438
439 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440                                         struct rtnl_link_stats64 *stats)
441 {
442         struct be_adapter *adapter = netdev_priv(netdev);
443         struct be_drv_stats *drvs = &adapter->drv_stats;
444         struct be_rx_obj *rxo;
445         struct be_tx_obj *txo;
446         u64 pkts, bytes;
447         unsigned int start;
448         int i;
449
450         for_all_rx_queues(adapter, rxo, i) {
451                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452                 do {
453                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454                         pkts = rx_stats(rxo)->rx_pkts;
455                         bytes = rx_stats(rxo)->rx_bytes;
456                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457                 stats->rx_packets += pkts;
458                 stats->rx_bytes += bytes;
459                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461                                         rx_stats(rxo)->rx_drops_no_frags;
462         }
463
464         for_all_tx_queues(adapter, txo, i) {
465                 const struct be_tx_stats *tx_stats = tx_stats(txo);
466                 do {
467                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468                         pkts = tx_stats(txo)->tx_pkts;
469                         bytes = tx_stats(txo)->tx_bytes;
470                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471                 stats->tx_packets += pkts;
472                 stats->tx_bytes += bytes;
473         }
474
475         /* bad pkts received */
476         stats->rx_errors = drvs->rx_crc_errors +
477                 drvs->rx_alignment_symbol_errors +
478                 drvs->rx_in_range_errors +
479                 drvs->rx_out_range_errors +
480                 drvs->rx_frame_too_long +
481                 drvs->rx_dropped_too_small +
482                 drvs->rx_dropped_too_short +
483                 drvs->rx_dropped_header_too_small +
484                 drvs->rx_dropped_tcp_length +
485                 drvs->rx_dropped_runt;
486
487         /* detailed rx errors */
488         stats->rx_length_errors = drvs->rx_in_range_errors +
489                 drvs->rx_out_range_errors +
490                 drvs->rx_frame_too_long;
491
492         stats->rx_crc_errors = drvs->rx_crc_errors;
493
494         /* frame alignment errors */
495         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
496
497         /* receiver fifo overrun */
498         /* drops_no_pbuf is no per i/f, it's per BE card */
499         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
500                                 drvs->rx_input_fifo_overflow_drop +
501                                 drvs->rx_drops_no_pbuf;
502         return stats;
503 }
504
505 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
506 {
507         struct net_device *netdev = adapter->netdev;
508
509         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
510                 netif_carrier_off(netdev);
511                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
512         }
513
514         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515                 netif_carrier_on(netdev);
516         else
517                 netif_carrier_off(netdev);
518 }
519
520 static void be_tx_stats_update(struct be_tx_obj *txo,
521                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
522 {
523         struct be_tx_stats *stats = tx_stats(txo);
524
525         u64_stats_update_begin(&stats->sync);
526         stats->tx_reqs++;
527         stats->tx_wrbs += wrb_cnt;
528         stats->tx_bytes += copied;
529         stats->tx_pkts += (gso_segs ? gso_segs : 1);
530         if (stopped)
531                 stats->tx_stops++;
532         u64_stats_update_end(&stats->sync);
533 }
534
535 /* Determine number of WRB entries needed to xmit data in an skb */
536 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537                                                                 bool *dummy)
538 {
539         int cnt = (skb->len > skb->data_len);
540
541         cnt += skb_shinfo(skb)->nr_frags;
542
543         /* to account for hdr wrb */
544         cnt++;
545         if (lancer_chip(adapter) || !(cnt & 1)) {
546                 *dummy = false;
547         } else {
548                 /* add a dummy to make it an even num */
549                 cnt++;
550                 *dummy = true;
551         }
552         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553         return cnt;
554 }
555
556 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557 {
558         wrb->frag_pa_hi = upper_32_bits(addr);
559         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561 }
562
563 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564                                         struct sk_buff *skb)
565 {
566         u8 vlan_prio;
567         u16 vlan_tag;
568
569         vlan_tag = vlan_tx_tag_get(skb);
570         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571         /* If vlan priority provided by OS is NOT in available bmap */
572         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574                                 adapter->recommended_prio;
575
576         return vlan_tag;
577 }
578
579 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
581 {
582         u16 vlan_tag;
583
584         memset(hdr, 0, sizeof(*hdr));
585
586         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
588         if (skb_is_gso(skb)) {
589                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591                         hdr, skb_shinfo(skb)->gso_size);
592                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
593                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
594                 if (lancer_chip(adapter) && adapter->sli_family  ==
595                                                         LANCER_A0_SLI_FAMILY) {
596                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597                         if (is_tcp_pkt(skb))
598                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599                                                                 tcpcs, hdr, 1);
600                         else if (is_udp_pkt(skb))
601                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602                                                                 udpcs, hdr, 1);
603                 }
604         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605                 if (is_tcp_pkt(skb))
606                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607                 else if (is_udp_pkt(skb))
608                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609         }
610
611         if (vlan_tx_tag_present(skb)) {
612                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
613                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
614                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
615         }
616
617         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621 }
622
623 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
624                 bool unmap_single)
625 {
626         dma_addr_t dma;
627
628         be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
631         if (wrb->frag_len) {
632                 if (unmap_single)
633                         dma_unmap_single(dev, dma, wrb->frag_len,
634                                          DMA_TO_DEVICE);
635                 else
636                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
637         }
638 }
639
640 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
641                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642 {
643         dma_addr_t busaddr;
644         int i, copied = 0;
645         struct device *dev = &adapter->pdev->dev;
646         struct sk_buff *first_skb = skb;
647         struct be_eth_wrb *wrb;
648         struct be_eth_hdr_wrb *hdr;
649         bool map_single = false;
650         u16 map_head;
651
652         hdr = queue_head_node(txq);
653         queue_head_inc(txq);
654         map_head = txq->head;
655
656         if (skb->len > skb->data_len) {
657                 int len = skb_headlen(skb);
658                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659                 if (dma_mapping_error(dev, busaddr))
660                         goto dma_err;
661                 map_single = true;
662                 wrb = queue_head_node(txq);
663                 wrb_fill(wrb, busaddr, len);
664                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665                 queue_head_inc(txq);
666                 copied += len;
667         }
668
669         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
670                 const struct skb_frag_struct *frag =
671                         &skb_shinfo(skb)->frags[i];
672                 busaddr = skb_frag_dma_map(dev, frag, 0,
673                                            skb_frag_size(frag), DMA_TO_DEVICE);
674                 if (dma_mapping_error(dev, busaddr))
675                         goto dma_err;
676                 wrb = queue_head_node(txq);
677                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
678                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679                 queue_head_inc(txq);
680                 copied += skb_frag_size(frag);
681         }
682
683         if (dummy_wrb) {
684                 wrb = queue_head_node(txq);
685                 wrb_fill(wrb, 0, 0);
686                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687                 queue_head_inc(txq);
688         }
689
690         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
691         be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693         return copied;
694 dma_err:
695         txq->head = map_head;
696         while (copied) {
697                 wrb = queue_head_node(txq);
698                 unmap_tx_frag(dev, wrb, map_single);
699                 map_single = false;
700                 copied -= wrb->frag_len;
701                 queue_head_inc(txq);
702         }
703         return 0;
704 }
705
706 static netdev_tx_t be_xmit(struct sk_buff *skb,
707                         struct net_device *netdev)
708 {
709         struct be_adapter *adapter = netdev_priv(netdev);
710         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711         struct be_queue_info *txq = &txo->q;
712         u32 wrb_cnt = 0, copied = 0;
713         u32 start = txq->head;
714         bool dummy_wrb, stopped = false;
715
716         /* For vlan tagged pkts, BE
717          * 1) calculates checksum even when CSO is not requested
718          * 2) calculates checksum wrongly for padded pkt less than
719          * 60 bytes long.
720          * As a workaround disable TX vlan offloading in such cases.
721          */
722         if (vlan_tx_tag_present(skb) &&
723             (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60)) {
724                 skb = skb_share_check(skb, GFP_ATOMIC);
725                 if (unlikely(!skb))
726                         goto tx_drop;
727
728                 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729                 if (unlikely(!skb))
730                         goto tx_drop;
731
732                 skb->vlan_tci = 0;
733         }
734
735         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
736
737         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
738         if (copied) {
739                 int gso_segs = skb_shinfo(skb)->gso_segs;
740
741                 /* record the sent skb in the sent_skb table */
742                 BUG_ON(txo->sent_skb_list[start]);
743                 txo->sent_skb_list[start] = skb;
744
745                 /* Ensure txq has space for the next skb; Else stop the queue
746                  * *BEFORE* ringing the tx doorbell, so that we serialze the
747                  * tx compls of the current transmit which'll wake up the queue
748                  */
749                 atomic_add(wrb_cnt, &txq->used);
750                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
751                                                                 txq->len) {
752                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
753                         stopped = true;
754                 }
755
756                 be_txq_notify(adapter, txq->id, wrb_cnt);
757
758                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
759         } else {
760                 txq->head = start;
761                 dev_kfree_skb_any(skb);
762         }
763 tx_drop:
764         return NETDEV_TX_OK;
765 }
766
767 static int be_change_mtu(struct net_device *netdev, int new_mtu)
768 {
769         struct be_adapter *adapter = netdev_priv(netdev);
770         if (new_mtu < BE_MIN_MTU ||
771                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
772                                         (ETH_HLEN + ETH_FCS_LEN))) {
773                 dev_info(&adapter->pdev->dev,
774                         "MTU must be between %d and %d bytes\n",
775                         BE_MIN_MTU,
776                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
777                 return -EINVAL;
778         }
779         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
780                         netdev->mtu, new_mtu);
781         netdev->mtu = new_mtu;
782         return 0;
783 }
784
785 /*
786  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
787  * If the user configures more, place BE in vlan promiscuous mode.
788  */
789 static int be_vid_config(struct be_adapter *adapter)
790 {
791         u16 vids[BE_NUM_VLANS_SUPPORTED];
792         u16 num = 0, i;
793         int status = 0;
794
795         /* No need to further configure vids if in promiscuous mode */
796         if (adapter->promiscuous)
797                 return 0;
798
799         if (adapter->vlans_added > adapter->max_vlans)
800                 goto set_vlan_promisc;
801
802         /* Construct VLAN Table to give to HW */
803         for (i = 0; i < VLAN_N_VID; i++)
804                 if (adapter->vlan_tag[i])
805                         vids[num++] = cpu_to_le16(i);
806
807         status = be_cmd_vlan_config(adapter, adapter->if_handle,
808                                     vids, num, 1, 0);
809
810         /* Set to VLAN promisc mode as setting VLAN filter failed */
811         if (status) {
812                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
813                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
814                 goto set_vlan_promisc;
815         }
816
817         return status;
818
819 set_vlan_promisc:
820         status = be_cmd_vlan_config(adapter, adapter->if_handle,
821                                     NULL, 0, 1, 1);
822         return status;
823 }
824
825 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
826 {
827         struct be_adapter *adapter = netdev_priv(netdev);
828         int status = 0;
829
830         if (!be_physfn(adapter)) {
831                 status = -EINVAL;
832                 goto ret;
833         }
834
835         adapter->vlan_tag[vid] = 1;
836         if (adapter->vlans_added <= (adapter->max_vlans + 1))
837                 status = be_vid_config(adapter);
838
839         if (!status)
840                 adapter->vlans_added++;
841         else
842                 adapter->vlan_tag[vid] = 0;
843 ret:
844         return status;
845 }
846
847 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
848 {
849         struct be_adapter *adapter = netdev_priv(netdev);
850         int status = 0;
851
852         if (!be_physfn(adapter)) {
853                 status = -EINVAL;
854                 goto ret;
855         }
856
857         adapter->vlan_tag[vid] = 0;
858         if (adapter->vlans_added <= adapter->max_vlans)
859                 status = be_vid_config(adapter);
860
861         if (!status)
862                 adapter->vlans_added--;
863         else
864                 adapter->vlan_tag[vid] = 1;
865 ret:
866         return status;
867 }
868
869 static void be_set_rx_mode(struct net_device *netdev)
870 {
871         struct be_adapter *adapter = netdev_priv(netdev);
872         int status;
873
874         if (netdev->flags & IFF_PROMISC) {
875                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
876                 adapter->promiscuous = true;
877                 goto done;
878         }
879
880         /* BE was previously in promiscuous mode; disable it */
881         if (adapter->promiscuous) {
882                 adapter->promiscuous = false;
883                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
884
885                 if (adapter->vlans_added)
886                         be_vid_config(adapter);
887         }
888
889         /* Enable multicast promisc if num configured exceeds what we support */
890         if (netdev->flags & IFF_ALLMULTI ||
891                         netdev_mc_count(netdev) > BE_MAX_MC) {
892                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
893                 goto done;
894         }
895
896         if (netdev_uc_count(netdev) != adapter->uc_macs) {
897                 struct netdev_hw_addr *ha;
898                 int i = 1; /* First slot is claimed by the Primary MAC */
899
900                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
901                         be_cmd_pmac_del(adapter, adapter->if_handle,
902                                         adapter->pmac_id[i], 0);
903                 }
904
905                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
906                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
907                         adapter->promiscuous = true;
908                         goto done;
909                 }
910
911                 netdev_for_each_uc_addr(ha, adapter->netdev) {
912                         adapter->uc_macs++; /* First slot is for Primary MAC */
913                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
914                                         adapter->if_handle,
915                                         &adapter->pmac_id[adapter->uc_macs], 0);
916                 }
917         }
918
919         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
920
921         /* Set to MCAST promisc mode if setting MULTICAST address fails */
922         if (status) {
923                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
924                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
925                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
926         }
927 done:
928         return;
929 }
930
931 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
932 {
933         struct be_adapter *adapter = netdev_priv(netdev);
934         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
935         int status;
936
937         if (!sriov_enabled(adapter))
938                 return -EPERM;
939
940         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
941                 return -EINVAL;
942
943         if (lancer_chip(adapter)) {
944                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
945         } else {
946                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
947                                          vf_cfg->pmac_id, vf + 1);
948
949                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
950                                          &vf_cfg->pmac_id, vf + 1);
951         }
952
953         if (status)
954                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
955                                 mac, vf);
956         else
957                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
958
959         return status;
960 }
961
962 static int be_get_vf_config(struct net_device *netdev, int vf,
963                         struct ifla_vf_info *vi)
964 {
965         struct be_adapter *adapter = netdev_priv(netdev);
966         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
967
968         if (!sriov_enabled(adapter))
969                 return -EPERM;
970
971         if (vf >= adapter->num_vfs)
972                 return -EINVAL;
973
974         vi->vf = vf;
975         vi->tx_rate = vf_cfg->tx_rate;
976         vi->vlan = vf_cfg->vlan_tag;
977         vi->qos = 0;
978         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
979
980         return 0;
981 }
982
983 static int be_set_vf_vlan(struct net_device *netdev,
984                         int vf, u16 vlan, u8 qos)
985 {
986         struct be_adapter *adapter = netdev_priv(netdev);
987         int status = 0;
988
989         if (!sriov_enabled(adapter))
990                 return -EPERM;
991
992         if (vf >= adapter->num_vfs || vlan > 4095)
993                 return -EINVAL;
994
995         if (vlan) {
996                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
997                         /* If this is new value, program it. Else skip. */
998                         adapter->vf_cfg[vf].vlan_tag = vlan;
999
1000                         status = be_cmd_set_hsw_config(adapter, vlan,
1001                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1002                 }
1003         } else {
1004                 /* Reset Transparent Vlan Tagging. */
1005                 adapter->vf_cfg[vf].vlan_tag = 0;
1006                 vlan = adapter->vf_cfg[vf].def_vid;
1007                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1008                         adapter->vf_cfg[vf].if_handle);
1009         }
1010
1011
1012         if (status)
1013                 dev_info(&adapter->pdev->dev,
1014                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1015         return status;
1016 }
1017
1018 static int be_set_vf_tx_rate(struct net_device *netdev,
1019                         int vf, int rate)
1020 {
1021         struct be_adapter *adapter = netdev_priv(netdev);
1022         int status = 0;
1023
1024         if (!sriov_enabled(adapter))
1025                 return -EPERM;
1026
1027         if (vf >= adapter->num_vfs)
1028                 return -EINVAL;
1029
1030         if (rate < 100 || rate > 10000) {
1031                 dev_err(&adapter->pdev->dev,
1032                         "tx rate must be between 100 and 10000 Mbps\n");
1033                 return -EINVAL;
1034         }
1035
1036         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1037
1038         if (status)
1039                 dev_err(&adapter->pdev->dev,
1040                                 "tx rate %d on VF %d failed\n", rate, vf);
1041         else
1042                 adapter->vf_cfg[vf].tx_rate = rate;
1043         return status;
1044 }
1045
1046 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1047 {
1048         struct pci_dev *dev, *pdev = adapter->pdev;
1049         int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1050         u16 offset, stride;
1051
1052         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1053         if (!pos)
1054                 return 0;
1055         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1056         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1057
1058         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1059         while (dev) {
1060                 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1061                 if (dev->is_virtfn && dev->devfn == vf_fn) {
1062                         vfs++;
1063                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1064                                 assigned_vfs++;
1065                 }
1066                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1067         }
1068         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1069 }
1070
1071 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1072 {
1073         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1074         ulong now = jiffies;
1075         ulong delta = now - stats->rx_jiffies;
1076         u64 pkts;
1077         unsigned int start, eqd;
1078
1079         if (!eqo->enable_aic) {
1080                 eqd = eqo->eqd;
1081                 goto modify_eqd;
1082         }
1083
1084         if (eqo->idx >= adapter->num_rx_qs)
1085                 return;
1086
1087         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1088
1089         /* Wrapped around */
1090         if (time_before(now, stats->rx_jiffies)) {
1091                 stats->rx_jiffies = now;
1092                 return;
1093         }
1094
1095         /* Update once a second */
1096         if (delta < HZ)
1097                 return;
1098
1099         do {
1100                 start = u64_stats_fetch_begin_bh(&stats->sync);
1101                 pkts = stats->rx_pkts;
1102         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1103
1104         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1105         stats->rx_pkts_prev = pkts;
1106         stats->rx_jiffies = now;
1107         eqd = (stats->rx_pps / 110000) << 3;
1108         eqd = min(eqd, eqo->max_eqd);
1109         eqd = max(eqd, eqo->min_eqd);
1110         if (eqd < 10)
1111                 eqd = 0;
1112
1113 modify_eqd:
1114         if (eqd != eqo->cur_eqd) {
1115                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1116                 eqo->cur_eqd = eqd;
1117         }
1118 }
1119
1120 static void be_rx_stats_update(struct be_rx_obj *rxo,
1121                 struct be_rx_compl_info *rxcp)
1122 {
1123         struct be_rx_stats *stats = rx_stats(rxo);
1124
1125         u64_stats_update_begin(&stats->sync);
1126         stats->rx_compl++;
1127         stats->rx_bytes += rxcp->pkt_size;
1128         stats->rx_pkts++;
1129         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1130                 stats->rx_mcast_pkts++;
1131         if (rxcp->err)
1132                 stats->rx_compl_err++;
1133         u64_stats_update_end(&stats->sync);
1134 }
1135
1136 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1137 {
1138         /* L4 checksum is not reliable for non TCP/UDP packets.
1139          * Also ignore ipcksm for ipv6 pkts */
1140         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1141                                 (rxcp->ip_csum || rxcp->ipv6);
1142 }
1143
1144 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1145                                                 u16 frag_idx)
1146 {
1147         struct be_adapter *adapter = rxo->adapter;
1148         struct be_rx_page_info *rx_page_info;
1149         struct be_queue_info *rxq = &rxo->q;
1150
1151         rx_page_info = &rxo->page_info_tbl[frag_idx];
1152         BUG_ON(!rx_page_info->page);
1153
1154         if (rx_page_info->last_page_user) {
1155                 dma_unmap_page(&adapter->pdev->dev,
1156                                dma_unmap_addr(rx_page_info, bus),
1157                                adapter->big_page_size, DMA_FROM_DEVICE);
1158                 rx_page_info->last_page_user = false;
1159         }
1160
1161         atomic_dec(&rxq->used);
1162         return rx_page_info;
1163 }
1164
1165 /* Throwaway the data in the Rx completion */
1166 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1167                                 struct be_rx_compl_info *rxcp)
1168 {
1169         struct be_queue_info *rxq = &rxo->q;
1170         struct be_rx_page_info *page_info;
1171         u16 i, num_rcvd = rxcp->num_rcvd;
1172
1173         for (i = 0; i < num_rcvd; i++) {
1174                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1175                 put_page(page_info->page);
1176                 memset(page_info, 0, sizeof(*page_info));
1177                 index_inc(&rxcp->rxq_idx, rxq->len);
1178         }
1179 }
1180
1181 /*
1182  * skb_fill_rx_data forms a complete skb for an ether frame
1183  * indicated by rxcp.
1184  */
1185 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1186                              struct be_rx_compl_info *rxcp)
1187 {
1188         struct be_queue_info *rxq = &rxo->q;
1189         struct be_rx_page_info *page_info;
1190         u16 i, j;
1191         u16 hdr_len, curr_frag_len, remaining;
1192         u8 *start;
1193
1194         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1195         start = page_address(page_info->page) + page_info->page_offset;
1196         prefetch(start);
1197
1198         /* Copy data in the first descriptor of this completion */
1199         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1200
1201         /* Copy the header portion into skb_data */
1202         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1203         memcpy(skb->data, start, hdr_len);
1204         skb->len = curr_frag_len;
1205         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1206                 /* Complete packet has now been moved to data */
1207                 put_page(page_info->page);
1208                 skb->data_len = 0;
1209                 skb->tail += curr_frag_len;
1210         } else {
1211                 skb_shinfo(skb)->nr_frags = 1;
1212                 skb_frag_set_page(skb, 0, page_info->page);
1213                 skb_shinfo(skb)->frags[0].page_offset =
1214                                         page_info->page_offset + hdr_len;
1215                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1216                 skb->data_len = curr_frag_len - hdr_len;
1217                 skb->truesize += rx_frag_size;
1218                 skb->tail += hdr_len;
1219         }
1220         page_info->page = NULL;
1221
1222         if (rxcp->pkt_size <= rx_frag_size) {
1223                 BUG_ON(rxcp->num_rcvd != 1);
1224                 return;
1225         }
1226
1227         /* More frags present for this completion */
1228         index_inc(&rxcp->rxq_idx, rxq->len);
1229         remaining = rxcp->pkt_size - curr_frag_len;
1230         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1231                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1232                 curr_frag_len = min(remaining, rx_frag_size);
1233
1234                 /* Coalesce all frags from the same physical page in one slot */
1235                 if (page_info->page_offset == 0) {
1236                         /* Fresh page */
1237                         j++;
1238                         skb_frag_set_page(skb, j, page_info->page);
1239                         skb_shinfo(skb)->frags[j].page_offset =
1240                                                         page_info->page_offset;
1241                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1242                         skb_shinfo(skb)->nr_frags++;
1243                 } else {
1244                         put_page(page_info->page);
1245                 }
1246
1247                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1248                 skb->len += curr_frag_len;
1249                 skb->data_len += curr_frag_len;
1250                 skb->truesize += rx_frag_size;
1251                 remaining -= curr_frag_len;
1252                 index_inc(&rxcp->rxq_idx, rxq->len);
1253                 page_info->page = NULL;
1254         }
1255         BUG_ON(j > MAX_SKB_FRAGS);
1256 }
1257
1258 /* Process the RX completion indicated by rxcp when GRO is disabled */
1259 static void be_rx_compl_process(struct be_rx_obj *rxo,
1260                                 struct be_rx_compl_info *rxcp)
1261 {
1262         struct be_adapter *adapter = rxo->adapter;
1263         struct net_device *netdev = adapter->netdev;
1264         struct sk_buff *skb;
1265
1266         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1267         if (unlikely(!skb)) {
1268                 rx_stats(rxo)->rx_drops_no_skbs++;
1269                 be_rx_compl_discard(rxo, rxcp);
1270                 return;
1271         }
1272
1273         skb_fill_rx_data(rxo, skb, rxcp);
1274
1275         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1276                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1277         else
1278                 skb_checksum_none_assert(skb);
1279
1280         skb->protocol = eth_type_trans(skb, netdev);
1281         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1282         if (netdev->features & NETIF_F_RXHASH)
1283                 skb->rxhash = rxcp->rss_hash;
1284
1285
1286         if (rxcp->vlanf)
1287                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1288
1289         netif_receive_skb(skb);
1290 }
1291
1292 /* Process the RX completion indicated by rxcp when GRO is enabled */
1293 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1294                              struct be_rx_compl_info *rxcp)
1295 {
1296         struct be_adapter *adapter = rxo->adapter;
1297         struct be_rx_page_info *page_info;
1298         struct sk_buff *skb = NULL;
1299         struct be_queue_info *rxq = &rxo->q;
1300         u16 remaining, curr_frag_len;
1301         u16 i, j;
1302
1303         skb = napi_get_frags(napi);
1304         if (!skb) {
1305                 be_rx_compl_discard(rxo, rxcp);
1306                 return;
1307         }
1308
1309         remaining = rxcp->pkt_size;
1310         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1311                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1312
1313                 curr_frag_len = min(remaining, rx_frag_size);
1314
1315                 /* Coalesce all frags from the same physical page in one slot */
1316                 if (i == 0 || page_info->page_offset == 0) {
1317                         /* First frag or Fresh page */
1318                         j++;
1319                         skb_frag_set_page(skb, j, page_info->page);
1320                         skb_shinfo(skb)->frags[j].page_offset =
1321                                                         page_info->page_offset;
1322                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1323                 } else {
1324                         put_page(page_info->page);
1325                 }
1326                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1327                 skb->truesize += rx_frag_size;
1328                 remaining -= curr_frag_len;
1329                 index_inc(&rxcp->rxq_idx, rxq->len);
1330                 memset(page_info, 0, sizeof(*page_info));
1331         }
1332         BUG_ON(j > MAX_SKB_FRAGS);
1333
1334         skb_shinfo(skb)->nr_frags = j + 1;
1335         skb->len = rxcp->pkt_size;
1336         skb->data_len = rxcp->pkt_size;
1337         skb->ip_summed = CHECKSUM_UNNECESSARY;
1338         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1339         if (adapter->netdev->features & NETIF_F_RXHASH)
1340                 skb->rxhash = rxcp->rss_hash;
1341
1342         if (rxcp->vlanf)
1343                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1344
1345         napi_gro_frags(napi);
1346 }
1347
1348 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1349                                  struct be_rx_compl_info *rxcp)
1350 {
1351         rxcp->pkt_size =
1352                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1353         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1354         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1355         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1356         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1357         rxcp->ip_csum =
1358                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1359         rxcp->l4_csum =
1360                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1361         rxcp->ipv6 =
1362                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1363         rxcp->rxq_idx =
1364                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1365         rxcp->num_rcvd =
1366                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1367         rxcp->pkt_type =
1368                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1369         rxcp->rss_hash =
1370                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1371         if (rxcp->vlanf) {
1372                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1373                                           compl);
1374                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1375                                                compl);
1376         }
1377         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1378 }
1379
1380 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1381                                  struct be_rx_compl_info *rxcp)
1382 {
1383         rxcp->pkt_size =
1384                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1385         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1386         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1387         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1388         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1389         rxcp->ip_csum =
1390                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1391         rxcp->l4_csum =
1392                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1393         rxcp->ipv6 =
1394                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1395         rxcp->rxq_idx =
1396                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1397         rxcp->num_rcvd =
1398                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1399         rxcp->pkt_type =
1400                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1401         rxcp->rss_hash =
1402                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1403         if (rxcp->vlanf) {
1404                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1405                                           compl);
1406                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1407                                                compl);
1408         }
1409         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1410 }
1411
1412 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1413 {
1414         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1415         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1416         struct be_adapter *adapter = rxo->adapter;
1417
1418         /* For checking the valid bit it is Ok to use either definition as the
1419          * valid bit is at the same position in both v0 and v1 Rx compl */
1420         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1421                 return NULL;
1422
1423         rmb();
1424         be_dws_le_to_cpu(compl, sizeof(*compl));
1425
1426         if (adapter->be3_native)
1427                 be_parse_rx_compl_v1(compl, rxcp);
1428         else
1429                 be_parse_rx_compl_v0(compl, rxcp);
1430
1431         if (rxcp->vlanf) {
1432                 /* vlanf could be wrongly set in some cards.
1433                  * ignore if vtm is not set */
1434                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1435                         rxcp->vlanf = 0;
1436
1437                 if (!lancer_chip(adapter))
1438                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1439
1440                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1441                     !adapter->vlan_tag[rxcp->vlan_tag])
1442                         rxcp->vlanf = 0;
1443         }
1444
1445         /* As the compl has been parsed, reset it; we wont touch it again */
1446         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1447
1448         queue_tail_inc(&rxo->cq);
1449         return rxcp;
1450 }
1451
1452 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1453 {
1454         u32 order = get_order(size);
1455
1456         if (order > 0)
1457                 gfp |= __GFP_COMP;
1458         return  alloc_pages(gfp, order);
1459 }
1460
1461 /*
1462  * Allocate a page, split it to fragments of size rx_frag_size and post as
1463  * receive buffers to BE
1464  */
1465 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1466 {
1467         struct be_adapter *adapter = rxo->adapter;
1468         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1469         struct be_queue_info *rxq = &rxo->q;
1470         struct page *pagep = NULL;
1471         struct be_eth_rx_d *rxd;
1472         u64 page_dmaaddr = 0, frag_dmaaddr;
1473         u32 posted, page_offset = 0;
1474
1475         page_info = &rxo->page_info_tbl[rxq->head];
1476         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1477                 if (!pagep) {
1478                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1479                         if (unlikely(!pagep)) {
1480                                 rx_stats(rxo)->rx_post_fail++;
1481                                 break;
1482                         }
1483                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1484                                                     0, adapter->big_page_size,
1485                                                     DMA_FROM_DEVICE);
1486                         page_info->page_offset = 0;
1487                 } else {
1488                         get_page(pagep);
1489                         page_info->page_offset = page_offset + rx_frag_size;
1490                 }
1491                 page_offset = page_info->page_offset;
1492                 page_info->page = pagep;
1493                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1494                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1495
1496                 rxd = queue_head_node(rxq);
1497                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1498                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1499
1500                 /* Any space left in the current big page for another frag? */
1501                 if ((page_offset + rx_frag_size + rx_frag_size) >
1502                                         adapter->big_page_size) {
1503                         pagep = NULL;
1504                         page_info->last_page_user = true;
1505                 }
1506
1507                 prev_page_info = page_info;
1508                 queue_head_inc(rxq);
1509                 page_info = &rxo->page_info_tbl[rxq->head];
1510         }
1511         if (pagep)
1512                 prev_page_info->last_page_user = true;
1513
1514         if (posted) {
1515                 atomic_add(posted, &rxq->used);
1516                 be_rxq_notify(adapter, rxq->id, posted);
1517         } else if (atomic_read(&rxq->used) == 0) {
1518                 /* Let be_worker replenish when memory is available */
1519                 rxo->rx_post_starved = true;
1520         }
1521 }
1522
1523 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1524 {
1525         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1526
1527         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1528                 return NULL;
1529
1530         rmb();
1531         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1532
1533         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1534
1535         queue_tail_inc(tx_cq);
1536         return txcp;
1537 }
1538
1539 static u16 be_tx_compl_process(struct be_adapter *adapter,
1540                 struct be_tx_obj *txo, u16 last_index)
1541 {
1542         struct be_queue_info *txq = &txo->q;
1543         struct be_eth_wrb *wrb;
1544         struct sk_buff **sent_skbs = txo->sent_skb_list;
1545         struct sk_buff *sent_skb;
1546         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1547         bool unmap_skb_hdr = true;
1548
1549         sent_skb = sent_skbs[txq->tail];
1550         BUG_ON(!sent_skb);
1551         sent_skbs[txq->tail] = NULL;
1552
1553         /* skip header wrb */
1554         queue_tail_inc(txq);
1555
1556         do {
1557                 cur_index = txq->tail;
1558                 wrb = queue_tail_node(txq);
1559                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1560                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1561                 unmap_skb_hdr = false;
1562
1563                 num_wrbs++;
1564                 queue_tail_inc(txq);
1565         } while (cur_index != last_index);
1566
1567         kfree_skb(sent_skb);
1568         return num_wrbs;
1569 }
1570
1571 /* Return the number of events in the event queue */
1572 static inline int events_get(struct be_eq_obj *eqo)
1573 {
1574         struct be_eq_entry *eqe;
1575         int num = 0;
1576
1577         do {
1578                 eqe = queue_tail_node(&eqo->q);
1579                 if (eqe->evt == 0)
1580                         break;
1581
1582                 rmb();
1583                 eqe->evt = 0;
1584                 num++;
1585                 queue_tail_inc(&eqo->q);
1586         } while (true);
1587
1588         return num;
1589 }
1590
1591 static int event_handle(struct be_eq_obj *eqo)
1592 {
1593         bool rearm = false;
1594         int num = events_get(eqo);
1595
1596         /* Deal with any spurious interrupts that come without events */
1597         if (!num)
1598                 rearm = true;
1599
1600         if (num || msix_enabled(eqo->adapter))
1601                 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1602
1603         if (num)
1604                 napi_schedule(&eqo->napi);
1605
1606         return num;
1607 }
1608
1609 /* Leaves the EQ is disarmed state */
1610 static void be_eq_clean(struct be_eq_obj *eqo)
1611 {
1612         int num = events_get(eqo);
1613
1614         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1615 }
1616
1617 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1618 {
1619         struct be_rx_page_info *page_info;
1620         struct be_queue_info *rxq = &rxo->q;
1621         struct be_queue_info *rx_cq = &rxo->cq;
1622         struct be_rx_compl_info *rxcp;
1623         u16 tail;
1624
1625         /* First cleanup pending rx completions */
1626         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1627                 be_rx_compl_discard(rxo, rxcp);
1628                 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1629         }
1630
1631         /* Then free posted rx buffer that were not used */
1632         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1633         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1634                 page_info = get_rx_page_info(rxo, tail);
1635                 put_page(page_info->page);
1636                 memset(page_info, 0, sizeof(*page_info));
1637         }
1638         BUG_ON(atomic_read(&rxq->used));
1639         rxq->tail = rxq->head = 0;
1640 }
1641
1642 static void be_tx_compl_clean(struct be_adapter *adapter)
1643 {
1644         struct be_tx_obj *txo;
1645         struct be_queue_info *txq;
1646         struct be_eth_tx_compl *txcp;
1647         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1648         struct sk_buff *sent_skb;
1649         bool dummy_wrb;
1650         int i, pending_txqs;
1651
1652         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1653         do {
1654                 pending_txqs = adapter->num_tx_qs;
1655
1656                 for_all_tx_queues(adapter, txo, i) {
1657                         txq = &txo->q;
1658                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1659                                 end_idx =
1660                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1661                                                       wrb_index, txcp);
1662                                 num_wrbs += be_tx_compl_process(adapter, txo,
1663                                                                 end_idx);
1664                                 cmpl++;
1665                         }
1666                         if (cmpl) {
1667                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1668                                 atomic_sub(num_wrbs, &txq->used);
1669                                 cmpl = 0;
1670                                 num_wrbs = 0;
1671                         }
1672                         if (atomic_read(&txq->used) == 0)
1673                                 pending_txqs--;
1674                 }
1675
1676                 if (pending_txqs == 0 || ++timeo > 200)
1677                         break;
1678
1679                 mdelay(1);
1680         } while (true);
1681
1682         for_all_tx_queues(adapter, txo, i) {
1683                 txq = &txo->q;
1684                 if (atomic_read(&txq->used))
1685                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1686                                 atomic_read(&txq->used));
1687
1688                 /* free posted tx for which compls will never arrive */
1689                 while (atomic_read(&txq->used)) {
1690                         sent_skb = txo->sent_skb_list[txq->tail];
1691                         end_idx = txq->tail;
1692                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1693                                                    &dummy_wrb);
1694                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1695                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1696                         atomic_sub(num_wrbs, &txq->used);
1697                 }
1698         }
1699 }
1700
1701 static void be_evt_queues_destroy(struct be_adapter *adapter)
1702 {
1703         struct be_eq_obj *eqo;
1704         int i;
1705
1706         for_all_evt_queues(adapter, eqo, i) {
1707                 be_eq_clean(eqo);
1708                 if (eqo->q.created)
1709                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1710                 be_queue_free(adapter, &eqo->q);
1711         }
1712 }
1713
1714 static int be_evt_queues_create(struct be_adapter *adapter)
1715 {
1716         struct be_queue_info *eq;
1717         struct be_eq_obj *eqo;
1718         int i, rc;
1719
1720         adapter->num_evt_qs = num_irqs(adapter);
1721
1722         for_all_evt_queues(adapter, eqo, i) {
1723                 eqo->adapter = adapter;
1724                 eqo->tx_budget = BE_TX_BUDGET;
1725                 eqo->idx = i;
1726                 eqo->max_eqd = BE_MAX_EQD;
1727                 eqo->enable_aic = true;
1728
1729                 eq = &eqo->q;
1730                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1731                                         sizeof(struct be_eq_entry));
1732                 if (rc)
1733                         return rc;
1734
1735                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1736                 if (rc)
1737                         return rc;
1738         }
1739         return 0;
1740 }
1741
1742 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1743 {
1744         struct be_queue_info *q;
1745
1746         q = &adapter->mcc_obj.q;
1747         if (q->created)
1748                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1749         be_queue_free(adapter, q);
1750
1751         q = &adapter->mcc_obj.cq;
1752         if (q->created)
1753                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1754         be_queue_free(adapter, q);
1755 }
1756
1757 /* Must be called only after TX qs are created as MCC shares TX EQ */
1758 static int be_mcc_queues_create(struct be_adapter *adapter)
1759 {
1760         struct be_queue_info *q, *cq;
1761
1762         cq = &adapter->mcc_obj.cq;
1763         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1764                         sizeof(struct be_mcc_compl)))
1765                 goto err;
1766
1767         /* Use the default EQ for MCC completions */
1768         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1769                 goto mcc_cq_free;
1770
1771         q = &adapter->mcc_obj.q;
1772         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1773                 goto mcc_cq_destroy;
1774
1775         if (be_cmd_mccq_create(adapter, q, cq))
1776                 goto mcc_q_free;
1777
1778         return 0;
1779
1780 mcc_q_free:
1781         be_queue_free(adapter, q);
1782 mcc_cq_destroy:
1783         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1784 mcc_cq_free:
1785         be_queue_free(adapter, cq);
1786 err:
1787         return -1;
1788 }
1789
1790 static void be_tx_queues_destroy(struct be_adapter *adapter)
1791 {
1792         struct be_queue_info *q;
1793         struct be_tx_obj *txo;
1794         u8 i;
1795
1796         for_all_tx_queues(adapter, txo, i) {
1797                 q = &txo->q;
1798                 if (q->created)
1799                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1800                 be_queue_free(adapter, q);
1801
1802                 q = &txo->cq;
1803                 if (q->created)
1804                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1805                 be_queue_free(adapter, q);
1806         }
1807 }
1808
1809 static int be_num_txqs_want(struct be_adapter *adapter)
1810 {
1811         if (sriov_want(adapter) || be_is_mc(adapter) ||
1812             lancer_chip(adapter) || !be_physfn(adapter) ||
1813             adapter->generation == BE_GEN2)
1814                 return 1;
1815         else
1816                 return MAX_TX_QS;
1817 }
1818
1819 static int be_tx_cqs_create(struct be_adapter *adapter)
1820 {
1821         struct be_queue_info *cq, *eq;
1822         int status;
1823         struct be_tx_obj *txo;
1824         u8 i;
1825
1826         adapter->num_tx_qs = be_num_txqs_want(adapter);
1827         if (adapter->num_tx_qs != MAX_TX_QS) {
1828                 rtnl_lock();
1829                 netif_set_real_num_tx_queues(adapter->netdev,
1830                         adapter->num_tx_qs);
1831                 rtnl_unlock();
1832         }
1833
1834         for_all_tx_queues(adapter, txo, i) {
1835                 cq = &txo->cq;
1836                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1837                                         sizeof(struct be_eth_tx_compl));
1838                 if (status)
1839                         return status;
1840
1841                 /* If num_evt_qs is less than num_tx_qs, then more than
1842                  * one txq share an eq
1843                  */
1844                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1845                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1846                 if (status)
1847                         return status;
1848         }
1849         return 0;
1850 }
1851
1852 static int be_tx_qs_create(struct be_adapter *adapter)
1853 {
1854         struct be_tx_obj *txo;
1855         int i, status;
1856
1857         for_all_tx_queues(adapter, txo, i) {
1858                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1859                                         sizeof(struct be_eth_wrb));
1860                 if (status)
1861                         return status;
1862
1863                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1864                 if (status)
1865                         return status;
1866         }
1867
1868         return 0;
1869 }
1870
1871 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1872 {
1873         struct be_queue_info *q;
1874         struct be_rx_obj *rxo;
1875         int i;
1876
1877         for_all_rx_queues(adapter, rxo, i) {
1878                 q = &rxo->cq;
1879                 if (q->created)
1880                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1881                 be_queue_free(adapter, q);
1882         }
1883 }
1884
1885 static int be_rx_cqs_create(struct be_adapter *adapter)
1886 {
1887         struct be_queue_info *eq, *cq;
1888         struct be_rx_obj *rxo;
1889         int rc, i;
1890
1891         /* We'll create as many RSS rings as there are irqs.
1892          * But when there's only one irq there's no use creating RSS rings
1893          */
1894         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1895                                 num_irqs(adapter) + 1 : 1;
1896         if (adapter->num_rx_qs != MAX_RX_QS) {
1897                 rtnl_lock();
1898                 netif_set_real_num_rx_queues(adapter->netdev,
1899                                              adapter->num_rx_qs);
1900                 rtnl_unlock();
1901         }
1902
1903         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1904         for_all_rx_queues(adapter, rxo, i) {
1905                 rxo->adapter = adapter;
1906                 cq = &rxo->cq;
1907                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1908                                 sizeof(struct be_eth_rx_compl));
1909                 if (rc)
1910                         return rc;
1911
1912                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1913                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1914                 if (rc)
1915                         return rc;
1916         }
1917
1918         if (adapter->num_rx_qs != MAX_RX_QS)
1919                 dev_info(&adapter->pdev->dev,
1920                         "Created only %d receive queues", adapter->num_rx_qs);
1921
1922         return 0;
1923 }
1924
1925 static irqreturn_t be_intx(int irq, void *dev)
1926 {
1927         struct be_adapter *adapter = dev;
1928         int num_evts;
1929
1930         /* With INTx only one EQ is used */
1931         num_evts = event_handle(&adapter->eq_obj[0]);
1932         if (num_evts)
1933                 return IRQ_HANDLED;
1934         else
1935                 return IRQ_NONE;
1936 }
1937
1938 static irqreturn_t be_msix(int irq, void *dev)
1939 {
1940         struct be_eq_obj *eqo = dev;
1941
1942         event_handle(eqo);
1943         return IRQ_HANDLED;
1944 }
1945
1946 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1947 {
1948         return (rxcp->tcpf && !rxcp->err) ? true : false;
1949 }
1950
1951 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1952                         int budget)
1953 {
1954         struct be_adapter *adapter = rxo->adapter;
1955         struct be_queue_info *rx_cq = &rxo->cq;
1956         struct be_rx_compl_info *rxcp;
1957         u32 work_done;
1958
1959         for (work_done = 0; work_done < budget; work_done++) {
1960                 rxcp = be_rx_compl_get(rxo);
1961                 if (!rxcp)
1962                         break;
1963
1964                 /* Is it a flush compl that has no data */
1965                 if (unlikely(rxcp->num_rcvd == 0))
1966                         goto loop_continue;
1967
1968                 /* Discard compl with partial DMA Lancer B0 */
1969                 if (unlikely(!rxcp->pkt_size)) {
1970                         be_rx_compl_discard(rxo, rxcp);
1971                         goto loop_continue;
1972                 }
1973
1974                 /* On BE drop pkts that arrive due to imperfect filtering in
1975                  * promiscuous mode on some skews
1976                  */
1977                 if (unlikely(rxcp->port != adapter->port_num &&
1978                                 !lancer_chip(adapter))) {
1979                         be_rx_compl_discard(rxo, rxcp);
1980                         goto loop_continue;
1981                 }
1982
1983                 if (do_gro(rxcp))
1984                         be_rx_compl_process_gro(rxo, napi, rxcp);
1985                 else
1986                         be_rx_compl_process(rxo, rxcp);
1987 loop_continue:
1988                 be_rx_stats_update(rxo, rxcp);
1989         }
1990
1991         if (work_done) {
1992                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1993
1994                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1995                         be_post_rx_frags(rxo, GFP_ATOMIC);
1996         }
1997
1998         return work_done;
1999 }
2000
2001 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2002                           int budget, int idx)
2003 {
2004         struct be_eth_tx_compl *txcp;
2005         int num_wrbs = 0, work_done;
2006
2007         for (work_done = 0; work_done < budget; work_done++) {
2008                 txcp = be_tx_compl_get(&txo->cq);
2009                 if (!txcp)
2010                         break;
2011                 num_wrbs += be_tx_compl_process(adapter, txo,
2012                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2013                                         wrb_index, txcp));
2014         }
2015
2016         if (work_done) {
2017                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2018                 atomic_sub(num_wrbs, &txo->q.used);
2019
2020                 /* As Tx wrbs have been freed up, wake up netdev queue
2021                  * if it was stopped due to lack of tx wrbs.  */
2022                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2023                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2024                         netif_wake_subqueue(adapter->netdev, idx);
2025                 }
2026
2027                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2028                 tx_stats(txo)->tx_compl += work_done;
2029                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2030         }
2031         return (work_done < budget); /* Done */
2032 }
2033
2034 int be_poll(struct napi_struct *napi, int budget)
2035 {
2036         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2037         struct be_adapter *adapter = eqo->adapter;
2038         int max_work = 0, work, i;
2039         bool tx_done;
2040
2041         /* Process all TXQs serviced by this EQ */
2042         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2043                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2044                                         eqo->tx_budget, i);
2045                 if (!tx_done)
2046                         max_work = budget;
2047         }
2048
2049         /* This loop will iterate twice for EQ0 in which
2050          * completions of the last RXQ (default one) are also processed
2051          * For other EQs the loop iterates only once
2052          */
2053         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2054                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2055                 max_work = max(work, max_work);
2056         }
2057
2058         if (is_mcc_eqo(eqo))
2059                 be_process_mcc(adapter);
2060
2061         if (max_work < budget) {
2062                 napi_complete(napi);
2063                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2064         } else {
2065                 /* As we'll continue in polling mode, count and clear events */
2066                 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2067         }
2068         return max_work;
2069 }
2070
2071 void be_detect_dump_ue(struct be_adapter *adapter)
2072 {
2073         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2074         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2075         u32 i;
2076
2077         if (adapter->eeh_err || adapter->ue_detected)
2078                 return;
2079
2080         if (lancer_chip(adapter)) {
2081                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2082                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2083                         sliport_err1 = ioread32(adapter->db +
2084                                         SLIPORT_ERROR1_OFFSET);
2085                         sliport_err2 = ioread32(adapter->db +
2086                                         SLIPORT_ERROR2_OFFSET);
2087                 }
2088         } else {
2089                 pci_read_config_dword(adapter->pdev,
2090                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2091                 pci_read_config_dword(adapter->pdev,
2092                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2093                 pci_read_config_dword(adapter->pdev,
2094                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2095                 pci_read_config_dword(adapter->pdev,
2096                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2097
2098                 ue_lo = (ue_lo & (~ue_lo_mask));
2099                 ue_hi = (ue_hi & (~ue_hi_mask));
2100         }
2101
2102         if (ue_lo || ue_hi ||
2103                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2104                 adapter->ue_detected = true;
2105                 adapter->eeh_err = true;
2106                 dev_err(&adapter->pdev->dev,
2107                         "Unrecoverable error in the card\n");
2108         }
2109
2110         if (ue_lo) {
2111                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2112                         if (ue_lo & 1)
2113                                 dev_err(&adapter->pdev->dev,
2114                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2115                 }
2116         }
2117         if (ue_hi) {
2118                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2119                         if (ue_hi & 1)
2120                                 dev_err(&adapter->pdev->dev,
2121                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2122                 }
2123         }
2124
2125         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2126                 dev_err(&adapter->pdev->dev,
2127                         "sliport status 0x%x\n", sliport_status);
2128                 dev_err(&adapter->pdev->dev,
2129                         "sliport error1 0x%x\n", sliport_err1);
2130                 dev_err(&adapter->pdev->dev,
2131                         "sliport error2 0x%x\n", sliport_err2);
2132         }
2133 }
2134
2135 static void be_msix_disable(struct be_adapter *adapter)
2136 {
2137         if (msix_enabled(adapter)) {
2138                 pci_disable_msix(adapter->pdev);
2139                 adapter->num_msix_vec = 0;
2140         }
2141 }
2142
2143 static uint be_num_rss_want(struct be_adapter *adapter)
2144 {
2145         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2146              !sriov_want(adapter) && be_physfn(adapter) &&
2147              !be_is_mc(adapter))
2148                 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2149         else
2150                 return 0;
2151 }
2152
2153 static void be_msix_enable(struct be_adapter *adapter)
2154 {
2155 #define BE_MIN_MSIX_VECTORS             1
2156         int i, status, num_vec, num_roce_vec = 0;
2157
2158         /* If RSS queues are not used, need a vec for default RX Q */
2159         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2160         if (be_roce_supported(adapter)) {
2161                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2162                                         (num_online_cpus() + 1));
2163                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2164                 num_vec += num_roce_vec;
2165                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2166         }
2167         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2168
2169         for (i = 0; i < num_vec; i++)
2170                 adapter->msix_entries[i].entry = i;
2171
2172         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2173         if (status == 0) {
2174                 goto done;
2175         } else if (status >= BE_MIN_MSIX_VECTORS) {
2176                 num_vec = status;
2177                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2178                                 num_vec) == 0)
2179                         goto done;
2180         }
2181         return;
2182 done:
2183         if (be_roce_supported(adapter)) {
2184                 if (num_vec > num_roce_vec) {
2185                         adapter->num_msix_vec = num_vec - num_roce_vec;
2186                         adapter->num_msix_roce_vec =
2187                                 num_vec - adapter->num_msix_vec;
2188                 } else {
2189                         adapter->num_msix_vec = num_vec;
2190                         adapter->num_msix_roce_vec = 0;
2191                 }
2192         } else
2193                 adapter->num_msix_vec = num_vec;
2194         return;
2195 }
2196
2197 static inline int be_msix_vec_get(struct be_adapter *adapter,
2198                                 struct be_eq_obj *eqo)
2199 {
2200         return adapter->msix_entries[eqo->idx].vector;
2201 }
2202
2203 static int be_msix_register(struct be_adapter *adapter)
2204 {
2205         struct net_device *netdev = adapter->netdev;
2206         struct be_eq_obj *eqo;
2207         int status, i, vec;
2208
2209         for_all_evt_queues(adapter, eqo, i) {
2210                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2211                 vec = be_msix_vec_get(adapter, eqo);
2212                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2213                 if (status)
2214                         goto err_msix;
2215         }
2216
2217         return 0;
2218 err_msix:
2219         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2220                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2221         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2222                 status);
2223         be_msix_disable(adapter);
2224         return status;
2225 }
2226
2227 static int be_irq_register(struct be_adapter *adapter)
2228 {
2229         struct net_device *netdev = adapter->netdev;
2230         int status;
2231
2232         if (msix_enabled(adapter)) {
2233                 status = be_msix_register(adapter);
2234                 if (status == 0)
2235                         goto done;
2236                 /* INTx is not supported for VF */
2237                 if (!be_physfn(adapter))
2238                         return status;
2239         }
2240
2241         /* INTx */
2242         netdev->irq = adapter->pdev->irq;
2243         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2244                         adapter);
2245         if (status) {
2246                 dev_err(&adapter->pdev->dev,
2247                         "INTx request IRQ failed - err %d\n", status);
2248                 return status;
2249         }
2250 done:
2251         adapter->isr_registered = true;
2252         return 0;
2253 }
2254
2255 static void be_irq_unregister(struct be_adapter *adapter)
2256 {
2257         struct net_device *netdev = adapter->netdev;
2258         struct be_eq_obj *eqo;
2259         int i;
2260
2261         if (!adapter->isr_registered)
2262                 return;
2263
2264         /* INTx */
2265         if (!msix_enabled(adapter)) {
2266                 free_irq(netdev->irq, adapter);
2267                 goto done;
2268         }
2269
2270         /* MSIx */
2271         for_all_evt_queues(adapter, eqo, i)
2272                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2273
2274 done:
2275         adapter->isr_registered = false;
2276 }
2277
2278 static void be_rx_qs_destroy(struct be_adapter *adapter)
2279 {
2280         struct be_queue_info *q;
2281         struct be_rx_obj *rxo;
2282         int i;
2283
2284         for_all_rx_queues(adapter, rxo, i) {
2285                 q = &rxo->q;
2286                 if (q->created) {
2287                         be_cmd_rxq_destroy(adapter, q);
2288                         /* After the rxq is invalidated, wait for a grace time
2289                          * of 1ms for all dma to end and the flush compl to
2290                          * arrive
2291                          */
2292                         mdelay(1);
2293                         be_rx_cq_clean(rxo);
2294                 }
2295                 be_queue_free(adapter, q);
2296         }
2297 }
2298
2299 static int be_close(struct net_device *netdev)
2300 {
2301         struct be_adapter *adapter = netdev_priv(netdev);
2302         struct be_eq_obj *eqo;
2303         int i;
2304
2305         be_roce_dev_close(adapter);
2306
2307         be_async_mcc_disable(adapter);
2308
2309         if (!lancer_chip(adapter))
2310                 be_intr_set(adapter, false);
2311
2312         for_all_evt_queues(adapter, eqo, i) {
2313                 napi_disable(&eqo->napi);
2314                 if (msix_enabled(adapter))
2315                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2316                 else
2317                         synchronize_irq(netdev->irq);
2318                 be_eq_clean(eqo);
2319         }
2320
2321         be_irq_unregister(adapter);
2322
2323         /* Wait for all pending tx completions to arrive so that
2324          * all tx skbs are freed.
2325          */
2326         be_tx_compl_clean(adapter);
2327
2328         be_rx_qs_destroy(adapter);
2329         return 0;
2330 }
2331
2332 static int be_rx_qs_create(struct be_adapter *adapter)
2333 {
2334         struct be_rx_obj *rxo;
2335         int rc, i, j;
2336         u8 rsstable[128];
2337
2338         for_all_rx_queues(adapter, rxo, i) {
2339                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2340                                     sizeof(struct be_eth_rx_d));
2341                 if (rc)
2342                         return rc;
2343         }
2344
2345         /* The FW would like the default RXQ to be created first */
2346         rxo = default_rxo(adapter);
2347         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2348                                adapter->if_handle, false, &rxo->rss_id);
2349         if (rc)
2350                 return rc;
2351
2352         for_all_rss_queues(adapter, rxo, i) {
2353                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2354                                        rx_frag_size, adapter->if_handle,
2355                                        true, &rxo->rss_id);
2356                 if (rc)
2357                         return rc;
2358         }
2359
2360         if (be_multi_rxq(adapter)) {
2361                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2362                         for_all_rss_queues(adapter, rxo, i) {
2363                                 if ((j + i) >= 128)
2364                                         break;
2365                                 rsstable[j + i] = rxo->rss_id;
2366                         }
2367                 }
2368                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2369                 if (rc)
2370                         return rc;
2371         }
2372
2373         /* First time posting */
2374         for_all_rx_queues(adapter, rxo, i)
2375                 be_post_rx_frags(rxo, GFP_KERNEL);
2376         return 0;
2377 }
2378
2379 static int be_open(struct net_device *netdev)
2380 {
2381         struct be_adapter *adapter = netdev_priv(netdev);
2382         struct be_eq_obj *eqo;
2383         struct be_rx_obj *rxo;
2384         struct be_tx_obj *txo;
2385         u8 link_status;
2386         int status, i;
2387
2388         status = be_rx_qs_create(adapter);
2389         if (status)
2390                 goto err;
2391
2392         be_irq_register(adapter);
2393
2394         if (!lancer_chip(adapter))
2395                 be_intr_set(adapter, true);
2396
2397         for_all_rx_queues(adapter, rxo, i)
2398                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2399
2400         for_all_tx_queues(adapter, txo, i)
2401                 be_cq_notify(adapter, txo->cq.id, true, 0);
2402
2403         be_async_mcc_enable(adapter);
2404
2405         for_all_evt_queues(adapter, eqo, i) {
2406                 napi_enable(&eqo->napi);
2407                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2408         }
2409
2410         status = be_cmd_link_status_query(adapter, NULL, NULL,
2411                                           &link_status, 0);
2412         if (!status)
2413                 be_link_status_update(adapter, link_status);
2414
2415         be_roce_dev_open(adapter);
2416         return 0;
2417 err:
2418         be_close(adapter->netdev);
2419         return -EIO;
2420 }
2421
2422 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2423 {
2424         struct be_dma_mem cmd;
2425         int status = 0;
2426         u8 mac[ETH_ALEN];
2427
2428         memset(mac, 0, ETH_ALEN);
2429
2430         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2431         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2432                                     GFP_KERNEL);
2433         if (cmd.va == NULL)
2434                 return -1;
2435         memset(cmd.va, 0, cmd.size);
2436
2437         if (enable) {
2438                 status = pci_write_config_dword(adapter->pdev,
2439                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2440                 if (status) {
2441                         dev_err(&adapter->pdev->dev,
2442                                 "Could not enable Wake-on-lan\n");
2443                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2444                                           cmd.dma);
2445                         return status;
2446                 }
2447                 status = be_cmd_enable_magic_wol(adapter,
2448                                 adapter->netdev->dev_addr, &cmd);
2449                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2450                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2451         } else {
2452                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2453                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2454                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2455         }
2456
2457         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2458         return status;
2459 }
2460
2461 /*
2462  * Generate a seed MAC address from the PF MAC Address using jhash.
2463  * MAC Address for VFs are assigned incrementally starting from the seed.
2464  * These addresses are programmed in the ASIC by the PF and the VF driver
2465  * queries for the MAC address during its probe.
2466  */
2467 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2468 {
2469         u32 vf;
2470         int status = 0;
2471         u8 mac[ETH_ALEN];
2472         struct be_vf_cfg *vf_cfg;
2473
2474         be_vf_eth_addr_generate(adapter, mac);
2475
2476         for_all_vfs(adapter, vf_cfg, vf) {
2477                 if (lancer_chip(adapter)) {
2478                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2479                 } else {
2480                         status = be_cmd_pmac_add(adapter, mac,
2481                                                  vf_cfg->if_handle,
2482                                                  &vf_cfg->pmac_id, vf + 1);
2483                 }
2484
2485                 if (status)
2486                         dev_err(&adapter->pdev->dev,
2487                         "Mac address assignment failed for VF %d\n", vf);
2488                 else
2489                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2490
2491                 mac[5] += 1;
2492         }
2493         return status;
2494 }
2495
2496 static void be_vf_clear(struct be_adapter *adapter)
2497 {
2498         struct be_vf_cfg *vf_cfg;
2499         u32 vf;
2500
2501         if (be_find_vfs(adapter, ASSIGNED)) {
2502                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2503                 goto done;
2504         }
2505
2506         for_all_vfs(adapter, vf_cfg, vf) {
2507                 if (lancer_chip(adapter))
2508                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2509                 else
2510                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2511                                         vf_cfg->pmac_id, vf + 1);
2512
2513                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2514         }
2515         pci_disable_sriov(adapter->pdev);
2516 done:
2517         kfree(adapter->vf_cfg);
2518         adapter->num_vfs = 0;
2519 }
2520
2521 static int be_clear(struct be_adapter *adapter)
2522 {
2523         int i = 1;
2524
2525         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2526                 cancel_delayed_work_sync(&adapter->work);
2527                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2528         }
2529
2530         if (sriov_enabled(adapter))
2531                 be_vf_clear(adapter);
2532
2533         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2534                 be_cmd_pmac_del(adapter, adapter->if_handle,
2535                         adapter->pmac_id[i], 0);
2536
2537         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2538
2539         be_mcc_queues_destroy(adapter);
2540         be_rx_cqs_destroy(adapter);
2541         be_tx_queues_destroy(adapter);
2542         be_evt_queues_destroy(adapter);
2543
2544         /* tell fw we're done with firing cmds */
2545         be_cmd_fw_clean(adapter);
2546
2547         be_msix_disable(adapter);
2548         return 0;
2549 }
2550
2551 static int be_vf_setup_init(struct be_adapter *adapter)
2552 {
2553         struct be_vf_cfg *vf_cfg;
2554         int vf;
2555
2556         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2557                                   GFP_KERNEL);
2558         if (!adapter->vf_cfg)
2559                 return -ENOMEM;
2560
2561         for_all_vfs(adapter, vf_cfg, vf) {
2562                 vf_cfg->if_handle = -1;
2563                 vf_cfg->pmac_id = -1;
2564         }
2565         return 0;
2566 }
2567
2568 static int be_vf_setup(struct be_adapter *adapter)
2569 {
2570         struct be_vf_cfg *vf_cfg;
2571         struct device *dev = &adapter->pdev->dev;
2572         u32 cap_flags, en_flags, vf;
2573         u16 def_vlan, lnk_speed;
2574         int status, enabled_vfs;
2575
2576         enabled_vfs = be_find_vfs(adapter, ENABLED);
2577         if (enabled_vfs) {
2578                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2579                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2580                 return 0;
2581         }
2582
2583         if (num_vfs > adapter->dev_num_vfs) {
2584                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2585                          adapter->dev_num_vfs, num_vfs);
2586                 num_vfs = adapter->dev_num_vfs;
2587         }
2588
2589         status = pci_enable_sriov(adapter->pdev, num_vfs);
2590         if (!status) {
2591                 adapter->num_vfs = num_vfs;
2592         } else {
2593                 /* Platform doesn't support SRIOV though device supports it */
2594                 dev_warn(dev, "SRIOV enable failed\n");
2595                 return 0;
2596         }
2597
2598         status = be_vf_setup_init(adapter);
2599         if (status)
2600                 goto err;
2601
2602         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2603                                 BE_IF_FLAGS_MULTICAST;
2604         for_all_vfs(adapter, vf_cfg, vf) {
2605                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2606                                           &vf_cfg->if_handle, vf + 1);
2607                 if (status)
2608                         goto err;
2609         }
2610
2611         if (!enabled_vfs) {
2612                 status = be_vf_eth_addr_config(adapter);
2613                 if (status)
2614                         goto err;
2615         }
2616
2617         for_all_vfs(adapter, vf_cfg, vf) {
2618                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2619                                                   NULL, vf + 1);
2620                 if (status)
2621                         goto err;
2622                 vf_cfg->tx_rate = lnk_speed * 10;
2623
2624                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2625                                 vf + 1, vf_cfg->if_handle);
2626                 if (status)
2627                         goto err;
2628                 vf_cfg->def_vid = def_vlan;
2629         }
2630         return 0;
2631 err:
2632         return status;
2633 }
2634
2635 static void be_setup_init(struct be_adapter *adapter)
2636 {
2637         adapter->vlan_prio_bmap = 0xff;
2638         adapter->phy.link_speed = -1;
2639         adapter->if_handle = -1;
2640         adapter->be3_native = false;
2641         adapter->promiscuous = false;
2642         adapter->eq_next_idx = 0;
2643         adapter->phy.forced_port_speed = -1;
2644 }
2645
2646 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2647                            bool *active_mac, u32 *pmac_id)
2648 {
2649         int status = 0;
2650
2651         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2652                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2653                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2654                         *active_mac = true;
2655                 else
2656                         *active_mac = false;
2657
2658                 return status;
2659         }
2660
2661         if (lancer_chip(adapter)) {
2662                 status = be_cmd_get_mac_from_list(adapter, mac,
2663                                                   active_mac, pmac_id, 0);
2664                 if (*active_mac) {
2665                         status = be_cmd_mac_addr_query(adapter, mac,
2666                                                        MAC_ADDRESS_TYPE_NETWORK,
2667                                                        false, if_handle,
2668                                                        *pmac_id);
2669                 }
2670         } else if (be_physfn(adapter)) {
2671                 /* For BE3, for PF get permanent MAC */
2672                 status = be_cmd_mac_addr_query(adapter, mac,
2673                                                MAC_ADDRESS_TYPE_NETWORK, true,
2674                                                0, 0);
2675                 *active_mac = false;
2676         } else {
2677                 /* For BE3, for VF get soft MAC assigned by PF*/
2678                 status = be_cmd_mac_addr_query(adapter, mac,
2679                                                MAC_ADDRESS_TYPE_NETWORK, false,
2680                                                if_handle, 0);
2681                 *active_mac = true;
2682         }
2683         return status;
2684 }
2685
2686 /* Routine to query per function resource limits */
2687 static int be_get_config(struct be_adapter *adapter)
2688 {
2689         int pos;
2690         u16 dev_num_vfs;
2691
2692         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2693         if (pos) {
2694                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2695                                      &dev_num_vfs);
2696                 adapter->dev_num_vfs = dev_num_vfs;
2697         }
2698         return 0;
2699 }
2700
2701 static int be_setup(struct be_adapter *adapter)
2702 {
2703         struct device *dev = &adapter->pdev->dev;
2704         u32 cap_flags, en_flags;
2705         u32 tx_fc, rx_fc;
2706         int status;
2707         u8 mac[ETH_ALEN];
2708         bool active_mac;
2709
2710         be_setup_init(adapter);
2711
2712         be_get_config(adapter);
2713
2714         be_cmd_req_native_mode(adapter);
2715
2716         be_msix_enable(adapter);
2717
2718         status = be_evt_queues_create(adapter);
2719         if (status)
2720                 goto err;
2721
2722         status = be_tx_cqs_create(adapter);
2723         if (status)
2724                 goto err;
2725
2726         status = be_rx_cqs_create(adapter);
2727         if (status)
2728                 goto err;
2729
2730         status = be_mcc_queues_create(adapter);
2731         if (status)
2732                 goto err;
2733
2734         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2735                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2736         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2737                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2738
2739         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2740                 cap_flags |= BE_IF_FLAGS_RSS;
2741                 en_flags |= BE_IF_FLAGS_RSS;
2742         }
2743
2744         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2745                                   &adapter->if_handle, 0);
2746         if (status != 0)
2747                 goto err;
2748
2749         memset(mac, 0, ETH_ALEN);
2750         active_mac = false;
2751         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2752                                  &active_mac, &adapter->pmac_id[0]);
2753         if (status != 0)
2754                 goto err;
2755
2756         if (!active_mac) {
2757                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2758                                          &adapter->pmac_id[0], 0);
2759                 if (status != 0)
2760                         goto err;
2761         }
2762
2763         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2764                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2765                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2766         }
2767
2768         status = be_tx_qs_create(adapter);
2769         if (status)
2770                 goto err;
2771
2772         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2773
2774         if (adapter->vlans_added)
2775                 be_vid_config(adapter);
2776
2777         be_set_rx_mode(adapter->netdev);
2778
2779         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2780
2781         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2782                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2783                                         adapter->rx_fc);
2784
2785         if (be_physfn(adapter) && num_vfs) {
2786                 if (adapter->dev_num_vfs)
2787                         be_vf_setup(adapter);
2788                 else
2789                         dev_warn(dev, "device doesn't support SRIOV\n");
2790         }
2791
2792         be_cmd_get_phy_info(adapter);
2793         if (be_pause_supported(adapter))
2794                 adapter->phy.fc_autoneg = 1;
2795
2796         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2797         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2798         return 0;
2799 err:
2800         be_clear(adapter);
2801         return status;
2802 }
2803
2804 #ifdef CONFIG_NET_POLL_CONTROLLER
2805 static void be_netpoll(struct net_device *netdev)
2806 {
2807         struct be_adapter *adapter = netdev_priv(netdev);
2808         struct be_eq_obj *eqo;
2809         int i;
2810
2811         for_all_evt_queues(adapter, eqo, i)
2812                 event_handle(eqo);
2813
2814         return;
2815 }
2816 #endif
2817
2818 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2819 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
2820
2821 static bool be_flash_redboot(struct be_adapter *adapter,
2822                         const u8 *p, u32 img_start, int image_size,
2823                         int hdr_size)
2824 {
2825         u32 crc_offset;
2826         u8 flashed_crc[4];
2827         int status;
2828
2829         crc_offset = hdr_size + img_start + image_size - 4;
2830
2831         p += crc_offset;
2832
2833         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2834                         (image_size - 4));
2835         if (status) {
2836                 dev_err(&adapter->pdev->dev,
2837                 "could not get crc from flash, not flashing redboot\n");
2838                 return false;
2839         }
2840
2841         /*update redboot only if crc does not match*/
2842         if (!memcmp(flashed_crc, p, 4))
2843                 return false;
2844         else
2845                 return true;
2846 }
2847
2848 static bool phy_flashing_required(struct be_adapter *adapter)
2849 {
2850         return (adapter->phy.phy_type == TN_8022 &&
2851                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2852 }
2853
2854 static bool is_comp_in_ufi(struct be_adapter *adapter,
2855                            struct flash_section_info *fsec, int type)
2856 {
2857         int i = 0, img_type = 0;
2858         struct flash_section_info_g2 *fsec_g2 = NULL;
2859
2860         if (adapter->generation != BE_GEN3)
2861                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2862
2863         for (i = 0; i < MAX_FLASH_COMP; i++) {
2864                 if (fsec_g2)
2865                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2866                 else
2867                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2868
2869                 if (img_type == type)
2870                         return true;
2871         }
2872         return false;
2873
2874 }
2875
2876 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2877                                          int header_size,
2878                                          const struct firmware *fw)
2879 {
2880         struct flash_section_info *fsec = NULL;
2881         const u8 *p = fw->data;
2882
2883         p += header_size;
2884         while (p < (fw->data + fw->size)) {
2885                 fsec = (struct flash_section_info *)p;
2886                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2887                         return fsec;
2888                 p += 32;
2889         }
2890         return NULL;
2891 }
2892
2893 static int be_flash_data(struct be_adapter *adapter,
2894                          const struct firmware *fw,
2895                          struct be_dma_mem *flash_cmd,
2896                          int num_of_images)
2897
2898 {
2899         int status = 0, i, filehdr_size = 0;
2900         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2901         u32 total_bytes = 0, flash_op;
2902         int num_bytes;
2903         const u8 *p = fw->data;
2904         struct be_cmd_write_flashrom *req = flash_cmd->va;
2905         const struct flash_comp *pflashcomp;
2906         int num_comp, hdr_size;
2907         struct flash_section_info *fsec = NULL;
2908
2909         struct flash_comp gen3_flash_types[] = {
2910                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2911                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2912                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2913                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2914                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2915                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2916                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2917                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2918                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2919                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2920                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2921                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2922                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2923                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2924                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2925                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2926                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2927                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2928                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2929                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2930         };
2931
2932         struct flash_comp gen2_flash_types[] = {
2933                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2934                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2935                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2936                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2937                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2938                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2939                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2940                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2941                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2942                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2943                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2944                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2945                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2946                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2947                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2948                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2949         };
2950
2951         if (adapter->generation == BE_GEN3) {
2952                 pflashcomp = gen3_flash_types;
2953                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2954                 num_comp = ARRAY_SIZE(gen3_flash_types);
2955         } else {
2956                 pflashcomp = gen2_flash_types;
2957                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2958                 num_comp = ARRAY_SIZE(gen2_flash_types);
2959         }
2960         /* Get flash section info*/
2961         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2962         if (!fsec) {
2963                 dev_err(&adapter->pdev->dev,
2964                         "Invalid Cookie. UFI corrupted ?\n");
2965                 return -1;
2966         }
2967         for (i = 0; i < num_comp; i++) {
2968                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
2969                         continue;
2970
2971                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2972                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2973                         continue;
2974
2975                 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
2976                         if (!phy_flashing_required(adapter))
2977                                 continue;
2978                 }
2979
2980                 hdr_size = filehdr_size +
2981                            (num_of_images * sizeof(struct image_hdr));
2982
2983                 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2984                     (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2985                                        pflashcomp[i].size, hdr_size)))
2986                         continue;
2987
2988                 /* Flash the component */
2989                 p = fw->data;
2990                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
2991                 if (p + pflashcomp[i].size > fw->data + fw->size)
2992                         return -1;
2993                 total_bytes = pflashcomp[i].size;
2994                 while (total_bytes) {
2995                         if (total_bytes > 32*1024)
2996                                 num_bytes = 32*1024;
2997                         else
2998                                 num_bytes = total_bytes;
2999                         total_bytes -= num_bytes;
3000                         if (!total_bytes) {
3001                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3002                                         flash_op = FLASHROM_OPER_PHY_FLASH;
3003                                 else
3004                                         flash_op = FLASHROM_OPER_FLASH;
3005                         } else {
3006                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3007                                         flash_op = FLASHROM_OPER_PHY_SAVE;
3008                                 else
3009                                         flash_op = FLASHROM_OPER_SAVE;
3010                         }
3011                         memcpy(req->params.data_buf, p, num_bytes);
3012                         p += num_bytes;
3013                         status = be_cmd_write_flashrom(adapter, flash_cmd,
3014                                 pflashcomp[i].optype, flash_op, num_bytes);
3015                         if (status) {
3016                                 if ((status == ILLEGAL_IOCTL_REQ) &&
3017                                         (pflashcomp[i].optype ==
3018                                                 OPTYPE_PHY_FW))
3019                                         break;
3020                                 dev_err(&adapter->pdev->dev,
3021                                         "cmd to write to flash rom failed.\n");
3022                                 return -1;
3023                         }
3024                 }
3025         }
3026         return 0;
3027 }
3028
3029 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3030 {
3031         if (fhdr == NULL)
3032                 return 0;
3033         if (fhdr->build[0] == '3')
3034                 return BE_GEN3;
3035         else if (fhdr->build[0] == '2')
3036                 return BE_GEN2;
3037         else
3038                 return 0;
3039 }
3040
3041 static int lancer_fw_download(struct be_adapter *adapter,
3042                                 const struct firmware *fw)
3043 {
3044 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3045 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3046         struct be_dma_mem flash_cmd;
3047         const u8 *data_ptr = NULL;
3048         u8 *dest_image_ptr = NULL;
3049         size_t image_size = 0;
3050         u32 chunk_size = 0;
3051         u32 data_written = 0;
3052         u32 offset = 0;
3053         int status = 0;
3054         u8 add_status = 0;
3055
3056         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3057                 dev_err(&adapter->pdev->dev,
3058                         "FW Image not properly aligned. "
3059                         "Length must be 4 byte aligned.\n");
3060                 status = -EINVAL;
3061                 goto lancer_fw_exit;
3062         }
3063
3064         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3065                                 + LANCER_FW_DOWNLOAD_CHUNK;
3066         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3067                                                 &flash_cmd.dma, GFP_KERNEL);
3068         if (!flash_cmd.va) {
3069                 status = -ENOMEM;
3070                 dev_err(&adapter->pdev->dev,
3071                         "Memory allocation failure while flashing\n");
3072                 goto lancer_fw_exit;
3073         }
3074
3075         dest_image_ptr = flash_cmd.va +
3076                                 sizeof(struct lancer_cmd_req_write_object);
3077         image_size = fw->size;
3078         data_ptr = fw->data;
3079
3080         while (image_size) {
3081                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3082
3083                 /* Copy the image chunk content. */
3084                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3085
3086                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3087                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3088                                 &data_written, &add_status);
3089
3090                 if (status)
3091                         break;
3092
3093                 offset += data_written;
3094                 data_ptr += data_written;
3095                 image_size -= data_written;
3096         }
3097
3098         if (!status) {
3099                 /* Commit the FW written */
3100                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3101                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3102                                         &data_written, &add_status);
3103         }
3104
3105         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3106                                 flash_cmd.dma);
3107         if (status) {
3108                 dev_err(&adapter->pdev->dev,
3109                         "Firmware load error. "
3110                         "Status code: 0x%x Additional Status: 0x%x\n",
3111                         status, add_status);
3112                 goto lancer_fw_exit;
3113         }
3114
3115         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3116 lancer_fw_exit:
3117         return status;
3118 }
3119
3120 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3121 {
3122         struct flash_file_hdr_g2 *fhdr;
3123         struct flash_file_hdr_g3 *fhdr3;
3124         struct image_hdr *img_hdr_ptr = NULL;
3125         struct be_dma_mem flash_cmd;
3126         const u8 *p;
3127         int status = 0, i = 0, num_imgs = 0;
3128
3129         p = fw->data;
3130         fhdr = (struct flash_file_hdr_g2 *) p;
3131
3132         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3133         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3134                                           &flash_cmd.dma, GFP_KERNEL);
3135         if (!flash_cmd.va) {
3136                 status = -ENOMEM;
3137                 dev_err(&adapter->pdev->dev,
3138                         "Memory allocation failure while flashing\n");
3139                 goto be_fw_exit;
3140         }
3141
3142         if ((adapter->generation == BE_GEN3) &&
3143                         (get_ufigen_type(fhdr) == BE_GEN3)) {
3144                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3145                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3146                 for (i = 0; i < num_imgs; i++) {
3147                         img_hdr_ptr = (struct image_hdr *) (fw->data +
3148                                         (sizeof(struct flash_file_hdr_g3) +
3149                                          i * sizeof(struct image_hdr)));
3150                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3151                                 status = be_flash_data(adapter, fw, &flash_cmd,
3152                                                         num_imgs);
3153                 }
3154         } else if ((adapter->generation == BE_GEN2) &&
3155                         (get_ufigen_type(fhdr) == BE_GEN2)) {
3156                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3157         } else {
3158                 dev_err(&adapter->pdev->dev,
3159                         "UFI and Interface are not compatible for flashing\n");
3160                 status = -1;
3161         }
3162
3163         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3164                           flash_cmd.dma);
3165         if (status) {
3166                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3167                 goto be_fw_exit;
3168         }
3169
3170         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3171
3172 be_fw_exit:
3173         return status;
3174 }
3175
3176 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3177 {
3178         const struct firmware *fw;
3179         int status;
3180
3181         if (!netif_running(adapter->netdev)) {
3182                 dev_err(&adapter->pdev->dev,
3183                         "Firmware load not allowed (interface is down)\n");
3184                 return -1;
3185         }
3186
3187         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3188         if (status)
3189                 goto fw_exit;
3190
3191         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3192
3193         if (lancer_chip(adapter))
3194                 status = lancer_fw_download(adapter, fw);
3195         else
3196                 status = be_fw_download(adapter, fw);
3197
3198 fw_exit:
3199         release_firmware(fw);
3200         return status;
3201 }
3202
3203 static const struct net_device_ops be_netdev_ops = {
3204         .ndo_open               = be_open,
3205         .ndo_stop               = be_close,
3206         .ndo_start_xmit         = be_xmit,
3207         .ndo_set_rx_mode        = be_set_rx_mode,
3208         .ndo_set_mac_address    = be_mac_addr_set,
3209         .ndo_change_mtu         = be_change_mtu,
3210         .ndo_get_stats64        = be_get_stats64,
3211         .ndo_validate_addr      = eth_validate_addr,
3212         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3213         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3214         .ndo_set_vf_mac         = be_set_vf_mac,
3215         .ndo_set_vf_vlan        = be_set_vf_vlan,
3216         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3217         .ndo_get_vf_config      = be_get_vf_config,
3218 #ifdef CONFIG_NET_POLL_CONTROLLER
3219         .ndo_poll_controller    = be_netpoll,
3220 #endif
3221 };
3222
3223 static void be_netdev_init(struct net_device *netdev)
3224 {
3225         struct be_adapter *adapter = netdev_priv(netdev);
3226         struct be_eq_obj *eqo;
3227         int i;
3228
3229         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3230                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3231                 NETIF_F_HW_VLAN_TX;
3232         if (be_multi_rxq(adapter))
3233                 netdev->hw_features |= NETIF_F_RXHASH;
3234
3235         netdev->features |= netdev->hw_features |
3236                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3237
3238         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3239                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3240
3241         netdev->priv_flags |= IFF_UNICAST_FLT;
3242
3243         netdev->flags |= IFF_MULTICAST;
3244
3245         netif_set_gso_max_size(netdev, 65535);
3246
3247         netdev->netdev_ops = &be_netdev_ops;
3248
3249         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3250
3251         for_all_evt_queues(adapter, eqo, i)
3252                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3253 }
3254
3255 static void be_unmap_pci_bars(struct be_adapter *adapter)
3256 {
3257         if (adapter->csr)
3258                 iounmap(adapter->csr);
3259         if (adapter->db)
3260                 iounmap(adapter->db);
3261         if (adapter->roce_db.base)
3262                 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3263 }
3264
3265 static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3266 {
3267         struct pci_dev *pdev = adapter->pdev;
3268         u8 __iomem *addr;
3269
3270         addr = pci_iomap(pdev, 2, 0);
3271         if (addr == NULL)
3272                 return -ENOMEM;
3273
3274         adapter->roce_db.base = addr;
3275         adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3276         adapter->roce_db.size = 8192;
3277         adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3278         return 0;
3279 }
3280
3281 static int be_map_pci_bars(struct be_adapter *adapter)
3282 {
3283         u8 __iomem *addr;
3284         int db_reg;
3285
3286         if (lancer_chip(adapter)) {
3287                 if (be_type_2_3(adapter)) {
3288                         addr = ioremap_nocache(
3289                                         pci_resource_start(adapter->pdev, 0),
3290                                         pci_resource_len(adapter->pdev, 0));
3291                         if (addr == NULL)
3292                                 return -ENOMEM;
3293                         adapter->db = addr;
3294                 }
3295                 if (adapter->if_type == SLI_INTF_TYPE_3) {
3296                         if (lancer_roce_map_pci_bars(adapter))
3297                                 goto pci_map_err;
3298                 }
3299                 return 0;
3300         }
3301
3302         if (be_physfn(adapter)) {
3303                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3304                                 pci_resource_len(adapter->pdev, 2));
3305                 if (addr == NULL)
3306                         return -ENOMEM;
3307                 adapter->csr = addr;
3308         }
3309
3310         if (adapter->generation == BE_GEN2) {
3311                 db_reg = 4;
3312         } else {
3313                 if (be_physfn(adapter))
3314                         db_reg = 4;
3315                 else
3316                         db_reg = 0;
3317         }
3318         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3319                                 pci_resource_len(adapter->pdev, db_reg));
3320         if (addr == NULL)
3321                 goto pci_map_err;
3322         adapter->db = addr;
3323         if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3324                 adapter->roce_db.size = 4096;
3325                 adapter->roce_db.io_addr =
3326                                 pci_resource_start(adapter->pdev, db_reg);
3327                 adapter->roce_db.total_size =
3328                                 pci_resource_len(adapter->pdev, db_reg);
3329         }
3330         return 0;
3331 pci_map_err:
3332         be_unmap_pci_bars(adapter);
3333         return -ENOMEM;
3334 }
3335
3336 static void be_ctrl_cleanup(struct be_adapter *adapter)
3337 {
3338         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3339
3340         be_unmap_pci_bars(adapter);
3341
3342         if (mem->va)
3343                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3344                                   mem->dma);
3345
3346         mem = &adapter->rx_filter;
3347         if (mem->va)
3348                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3349                                   mem->dma);
3350 }
3351
3352 static int be_ctrl_init(struct be_adapter *adapter)
3353 {
3354         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3355         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3356         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3357         int status;
3358
3359         status = be_map_pci_bars(adapter);
3360         if (status)
3361                 goto done;
3362
3363         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3364         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3365                                                 mbox_mem_alloc->size,
3366                                                 &mbox_mem_alloc->dma,
3367                                                 GFP_KERNEL);
3368         if (!mbox_mem_alloc->va) {
3369                 status = -ENOMEM;
3370                 goto unmap_pci_bars;
3371         }
3372         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3373         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3374         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3375         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3376
3377         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3378         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3379                                         &rx_filter->dma, GFP_KERNEL);
3380         if (rx_filter->va == NULL) {
3381                 status = -ENOMEM;
3382                 goto free_mbox;
3383         }
3384         memset(rx_filter->va, 0, rx_filter->size);
3385
3386         mutex_init(&adapter->mbox_lock);
3387         spin_lock_init(&adapter->mcc_lock);
3388         spin_lock_init(&adapter->mcc_cq_lock);
3389
3390         init_completion(&adapter->flash_compl);
3391         pci_save_state(adapter->pdev);
3392         return 0;
3393
3394 free_mbox:
3395         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3396                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3397
3398 unmap_pci_bars:
3399         be_unmap_pci_bars(adapter);
3400
3401 done:
3402         return status;
3403 }
3404
3405 static void be_stats_cleanup(struct be_adapter *adapter)
3406 {
3407         struct be_dma_mem *cmd = &adapter->stats_cmd;
3408
3409         if (cmd->va)
3410                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3411                                   cmd->va, cmd->dma);
3412 }
3413
3414 static int be_stats_init(struct be_adapter *adapter)
3415 {
3416         struct be_dma_mem *cmd = &adapter->stats_cmd;
3417
3418         if (adapter->generation == BE_GEN2) {
3419                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3420         } else {
3421                 if (lancer_chip(adapter))
3422                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3423                 else
3424                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3425         }
3426         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3427                                      GFP_KERNEL);
3428         if (cmd->va == NULL)
3429                 return -1;
3430         memset(cmd->va, 0, cmd->size);
3431         return 0;
3432 }
3433
3434 static void __devexit be_remove(struct pci_dev *pdev)
3435 {
3436         struct be_adapter *adapter = pci_get_drvdata(pdev);
3437
3438         if (!adapter)
3439                 return;
3440
3441         be_roce_dev_remove(adapter);
3442
3443         unregister_netdev(adapter->netdev);
3444
3445         be_clear(adapter);
3446
3447         be_stats_cleanup(adapter);
3448
3449         be_ctrl_cleanup(adapter);
3450
3451         pci_set_drvdata(pdev, NULL);
3452         pci_release_regions(pdev);
3453         pci_disable_device(pdev);
3454
3455         free_netdev(adapter->netdev);
3456 }
3457
3458 bool be_is_wol_supported(struct be_adapter *adapter)
3459 {
3460         return ((adapter->wol_cap & BE_WOL_CAP) &&
3461                 !be_is_wol_excluded(adapter)) ? true : false;
3462 }
3463
3464 u32 be_get_fw_log_level(struct be_adapter *adapter)
3465 {
3466         struct be_dma_mem extfat_cmd;
3467         struct be_fat_conf_params *cfgs;
3468         int status;
3469         u32 level = 0;
3470         int j;
3471
3472         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3473         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3474         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3475                                              &extfat_cmd.dma);
3476
3477         if (!extfat_cmd.va) {
3478                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3479                         __func__);
3480                 goto err;
3481         }
3482
3483         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3484         if (!status) {
3485                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3486                                                 sizeof(struct be_cmd_resp_hdr));
3487                 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3488                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3489                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3490                 }
3491         }
3492         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3493                             extfat_cmd.dma);
3494 err:
3495         return level;
3496 }
3497 static int be_get_initial_config(struct be_adapter *adapter)
3498 {
3499         int status;
3500         u32 level;
3501
3502         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3503                         &adapter->function_mode, &adapter->function_caps);
3504         if (status)
3505                 return status;
3506
3507         if (adapter->function_mode & FLEX10_MODE)
3508                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3509         else
3510                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3511
3512         if (be_physfn(adapter))
3513                 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3514         else
3515                 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3516
3517         /* primary mac needs 1 pmac entry */
3518         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3519                                   sizeof(u32), GFP_KERNEL);
3520         if (!adapter->pmac_id)
3521                 return -ENOMEM;
3522
3523         status = be_cmd_get_cntl_attributes(adapter);
3524         if (status)
3525                 return status;
3526
3527         status = be_cmd_get_acpi_wol_cap(adapter);
3528         if (status) {
3529                 /* in case of a failure to get wol capabillities
3530                  * check the exclusion list to determine WOL capability */
3531                 if (!be_is_wol_excluded(adapter))
3532                         adapter->wol_cap |= BE_WOL_CAP;
3533         }
3534
3535         if (be_is_wol_supported(adapter))
3536                 adapter->wol = true;
3537
3538         level = be_get_fw_log_level(adapter);
3539         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3540
3541         return 0;
3542 }
3543
3544 static int be_dev_type_check(struct be_adapter *adapter)
3545 {
3546         struct pci_dev *pdev = adapter->pdev;
3547         u32 sli_intf = 0, if_type;
3548
3549         switch (pdev->device) {
3550         case BE_DEVICE_ID1:
3551         case OC_DEVICE_ID1:
3552                 adapter->generation = BE_GEN2;
3553                 break;
3554         case BE_DEVICE_ID2:
3555         case OC_DEVICE_ID2:
3556                 adapter->generation = BE_GEN3;
3557                 break;
3558         case OC_DEVICE_ID3:
3559         case OC_DEVICE_ID4:
3560                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3561                 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3562                                                 SLI_INTF_IF_TYPE_SHIFT;
3563                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3564                                                 SLI_INTF_IF_TYPE_SHIFT;
3565                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3566                         !be_type_2_3(adapter)) {
3567                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3568                         return -EINVAL;
3569                 }
3570                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3571                                          SLI_INTF_FAMILY_SHIFT);
3572                 adapter->generation = BE_GEN3;
3573                 break;
3574         case OC_DEVICE_ID5:
3575                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3576                 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3577                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3578                         return -EINVAL;
3579                 }
3580                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3581                                          SLI_INTF_FAMILY_SHIFT);
3582                 adapter->generation = BE_GEN3;
3583                 break;
3584         default:
3585                 adapter->generation = 0;
3586         }
3587
3588         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3589         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3590         return 0;
3591 }
3592
3593 static int lancer_wait_ready(struct be_adapter *adapter)
3594 {
3595 #define SLIPORT_READY_TIMEOUT 30
3596         u32 sliport_status;
3597         int status = 0, i;
3598
3599         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3600                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3601                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3602                         break;
3603
3604                 msleep(1000);
3605         }
3606
3607         if (i == SLIPORT_READY_TIMEOUT)
3608                 status = -1;
3609
3610         return status;
3611 }
3612
3613 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3614 {
3615         int status;
3616         u32 sliport_status, err, reset_needed;
3617         status = lancer_wait_ready(adapter);
3618         if (!status) {
3619                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3620                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3621                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3622                 if (err && reset_needed) {
3623                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3624                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3625
3626                         /* check adapter has corrected the error */
3627                         status = lancer_wait_ready(adapter);
3628                         sliport_status = ioread32(adapter->db +
3629                                                         SLIPORT_STATUS_OFFSET);
3630                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3631                                                 SLIPORT_STATUS_RN_MASK);
3632                         if (status || sliport_status)
3633                                 status = -1;
3634                 } else if (err || reset_needed) {
3635                         status = -1;
3636                 }
3637         }
3638         return status;
3639 }
3640
3641 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3642 {
3643         int status;
3644         u32 sliport_status;
3645
3646         if (adapter->eeh_err || adapter->ue_detected)
3647                 return;
3648
3649         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3650
3651         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3652                 dev_err(&adapter->pdev->dev,
3653                                 "Adapter in error state."
3654                                 "Trying to recover.\n");
3655
3656                 status = lancer_test_and_set_rdy_state(adapter);
3657                 if (status)
3658                         goto err;
3659
3660                 netif_device_detach(adapter->netdev);
3661
3662                 if (netif_running(adapter->netdev))
3663                         be_close(adapter->netdev);
3664
3665                 be_clear(adapter);
3666
3667                 adapter->fw_timeout = false;
3668
3669                 status = be_setup(adapter);
3670                 if (status)
3671                         goto err;
3672
3673                 if (netif_running(adapter->netdev)) {
3674                         status = be_open(adapter->netdev);
3675                         if (status)
3676                                 goto err;
3677                 }
3678
3679                 netif_device_attach(adapter->netdev);
3680
3681                 dev_err(&adapter->pdev->dev,
3682                                 "Adapter error recovery succeeded\n");
3683         }
3684         return;
3685 err:
3686         dev_err(&adapter->pdev->dev,
3687                         "Adapter error recovery failed\n");
3688 }
3689
3690 static void be_worker(struct work_struct *work)
3691 {
3692         struct be_adapter *adapter =
3693                 container_of(work, struct be_adapter, work.work);
3694         struct be_rx_obj *rxo;
3695         struct be_eq_obj *eqo;
3696         int i;
3697
3698         if (lancer_chip(adapter))
3699                 lancer_test_and_recover_fn_err(adapter);
3700
3701         be_detect_dump_ue(adapter);
3702
3703         /* when interrupts are not yet enabled, just reap any pending
3704         * mcc completions */
3705         if (!netif_running(adapter->netdev)) {
3706                 be_process_mcc(adapter);
3707                 goto reschedule;
3708         }
3709
3710         if (!adapter->stats_cmd_sent) {
3711                 if (lancer_chip(adapter))
3712                         lancer_cmd_get_pport_stats(adapter,
3713                                                 &adapter->stats_cmd);
3714                 else
3715                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3716         }
3717
3718         for_all_rx_queues(adapter, rxo, i) {
3719                 if (rxo->rx_post_starved) {
3720                         rxo->rx_post_starved = false;
3721                         be_post_rx_frags(rxo, GFP_KERNEL);
3722                 }
3723         }
3724
3725         for_all_evt_queues(adapter, eqo, i)
3726                 be_eqd_update(adapter, eqo);
3727
3728 reschedule:
3729         adapter->work_counter++;
3730         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3731 }
3732
3733 static bool be_reset_required(struct be_adapter *adapter)
3734 {
3735         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3736 }
3737
3738 static int __devinit be_probe(struct pci_dev *pdev,
3739                         const struct pci_device_id *pdev_id)
3740 {
3741         int status = 0;
3742         struct be_adapter *adapter;
3743         struct net_device *netdev;
3744
3745         status = pci_enable_device(pdev);
3746         if (status)
3747                 goto do_none;
3748
3749         status = pci_request_regions(pdev, DRV_NAME);
3750         if (status)
3751                 goto disable_dev;
3752         pci_set_master(pdev);
3753
3754         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
3755         if (netdev == NULL) {
3756                 status = -ENOMEM;
3757                 goto rel_reg;
3758         }
3759         adapter = netdev_priv(netdev);
3760         adapter->pdev = pdev;
3761         pci_set_drvdata(pdev, adapter);
3762
3763         status = be_dev_type_check(adapter);
3764         if (status)
3765                 goto free_netdev;
3766
3767         adapter->netdev = netdev;
3768         SET_NETDEV_DEV(netdev, &pdev->dev);
3769
3770         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3771         if (!status) {
3772                 netdev->features |= NETIF_F_HIGHDMA;
3773         } else {
3774                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3775                 if (status) {
3776                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3777                         goto free_netdev;
3778                 }
3779         }
3780
3781         status = be_ctrl_init(adapter);
3782         if (status)
3783                 goto free_netdev;
3784
3785         if (lancer_chip(adapter)) {
3786                 status = lancer_wait_ready(adapter);
3787                 if (!status) {
3788                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3789                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3790                         status = lancer_test_and_set_rdy_state(adapter);
3791                 }
3792                 if (status) {
3793                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3794                         goto ctrl_clean;
3795                 }
3796         }
3797
3798         /* sync up with fw's ready state */
3799         if (be_physfn(adapter)) {
3800                 status = be_cmd_POST(adapter);
3801                 if (status)
3802                         goto ctrl_clean;
3803         }
3804
3805         /* tell fw we're ready to fire cmds */
3806         status = be_cmd_fw_init(adapter);
3807         if (status)
3808                 goto ctrl_clean;
3809
3810         if (be_reset_required(adapter)) {
3811                 status = be_cmd_reset_function(adapter);
3812                 if (status)
3813                         goto ctrl_clean;
3814         }
3815
3816         /* The INTR bit may be set in the card when probed by a kdump kernel
3817          * after a crash.
3818          */
3819         if (!lancer_chip(adapter))
3820                 be_intr_set(adapter, false);
3821
3822         status = be_stats_init(adapter);
3823         if (status)
3824                 goto ctrl_clean;
3825
3826         status = be_get_initial_config(adapter);
3827         if (status)
3828                 goto stats_clean;
3829
3830         INIT_DELAYED_WORK(&adapter->work, be_worker);
3831         adapter->rx_fc = adapter->tx_fc = true;
3832
3833         status = be_setup(adapter);
3834         if (status)
3835                 goto msix_disable;
3836
3837         be_netdev_init(netdev);
3838         status = register_netdev(netdev);
3839         if (status != 0)
3840                 goto unsetup;
3841
3842         be_roce_dev_add(adapter);
3843
3844         dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3845                 adapter->port_num);
3846
3847         return 0;
3848
3849 unsetup:
3850         be_clear(adapter);
3851 msix_disable:
3852         be_msix_disable(adapter);
3853 stats_clean:
3854         be_stats_cleanup(adapter);
3855 ctrl_clean:
3856         be_ctrl_cleanup(adapter);
3857 free_netdev:
3858         free_netdev(netdev);
3859         pci_set_drvdata(pdev, NULL);
3860 rel_reg:
3861         pci_release_regions(pdev);
3862 disable_dev:
3863         pci_disable_device(pdev);
3864 do_none:
3865         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3866         return status;
3867 }
3868
3869 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3870 {
3871         struct be_adapter *adapter = pci_get_drvdata(pdev);
3872         struct net_device *netdev =  adapter->netdev;
3873
3874         if (adapter->wol)
3875                 be_setup_wol(adapter, true);
3876
3877         netif_device_detach(netdev);
3878         if (netif_running(netdev)) {
3879                 rtnl_lock();
3880                 be_close(netdev);
3881                 rtnl_unlock();
3882         }
3883         be_clear(adapter);
3884
3885         pci_save_state(pdev);
3886         pci_disable_device(pdev);
3887         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3888         return 0;
3889 }
3890
3891 static int be_resume(struct pci_dev *pdev)
3892 {
3893         int status = 0;
3894         struct be_adapter *adapter = pci_get_drvdata(pdev);
3895         struct net_device *netdev =  adapter->netdev;
3896
3897         netif_device_detach(netdev);
3898
3899         status = pci_enable_device(pdev);
3900         if (status)
3901                 return status;
3902
3903         pci_set_power_state(pdev, 0);
3904         pci_restore_state(pdev);
3905
3906         /* tell fw we're ready to fire cmds */
3907         status = be_cmd_fw_init(adapter);
3908         if (status)
3909                 return status;
3910
3911         be_setup(adapter);
3912         if (netif_running(netdev)) {
3913                 rtnl_lock();
3914                 be_open(netdev);
3915                 rtnl_unlock();
3916         }
3917         netif_device_attach(netdev);
3918
3919         if (adapter->wol)
3920                 be_setup_wol(adapter, false);
3921
3922         return 0;
3923 }
3924
3925 /*
3926  * An FLR will stop BE from DMAing any data.
3927  */
3928 static void be_shutdown(struct pci_dev *pdev)
3929 {
3930         struct be_adapter *adapter = pci_get_drvdata(pdev);
3931
3932         if (!adapter)
3933                 return;
3934
3935         cancel_delayed_work_sync(&adapter->work);
3936
3937         netif_device_detach(adapter->netdev);
3938
3939         if (adapter->wol)
3940                 be_setup_wol(adapter, true);
3941
3942         be_cmd_reset_function(adapter);
3943
3944         pci_disable_device(pdev);
3945 }
3946
3947 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3948                                 pci_channel_state_t state)
3949 {
3950         struct be_adapter *adapter = pci_get_drvdata(pdev);
3951         struct net_device *netdev =  adapter->netdev;
3952
3953         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3954
3955         adapter->eeh_err = true;
3956
3957         netif_device_detach(netdev);
3958
3959         if (netif_running(netdev)) {
3960                 rtnl_lock();
3961                 be_close(netdev);
3962                 rtnl_unlock();
3963         }
3964         be_clear(adapter);
3965
3966         if (state == pci_channel_io_perm_failure)
3967                 return PCI_ERS_RESULT_DISCONNECT;
3968
3969         pci_disable_device(pdev);
3970
3971         /* The error could cause the FW to trigger a flash debug dump.
3972          * Resetting the card while flash dump is in progress
3973          * can cause it not to recover; wait for it to finish
3974          */
3975         ssleep(30);
3976         return PCI_ERS_RESULT_NEED_RESET;
3977 }
3978
3979 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3980 {
3981         struct be_adapter *adapter = pci_get_drvdata(pdev);
3982         int status;
3983
3984         dev_info(&adapter->pdev->dev, "EEH reset\n");
3985         adapter->eeh_err = false;
3986         adapter->ue_detected = false;
3987         adapter->fw_timeout = false;
3988
3989         status = pci_enable_device(pdev);
3990         if (status)
3991                 return PCI_ERS_RESULT_DISCONNECT;
3992
3993         pci_set_master(pdev);
3994         pci_set_power_state(pdev, 0);
3995         pci_restore_state(pdev);
3996
3997         /* Check if card is ok and fw is ready */
3998         status = be_cmd_POST(adapter);
3999         if (status)
4000                 return PCI_ERS_RESULT_DISCONNECT;
4001
4002         return PCI_ERS_RESULT_RECOVERED;
4003 }
4004
4005 static void be_eeh_resume(struct pci_dev *pdev)
4006 {
4007         int status = 0;
4008         struct be_adapter *adapter = pci_get_drvdata(pdev);
4009         struct net_device *netdev =  adapter->netdev;
4010
4011         dev_info(&adapter->pdev->dev, "EEH resume\n");
4012
4013         pci_save_state(pdev);
4014
4015         /* tell fw we're ready to fire cmds */
4016         status = be_cmd_fw_init(adapter);
4017         if (status)
4018                 goto err;
4019
4020         status = be_setup(adapter);
4021         if (status)
4022                 goto err;
4023
4024         if (netif_running(netdev)) {
4025                 status = be_open(netdev);
4026                 if (status)
4027                         goto err;
4028         }
4029         netif_device_attach(netdev);
4030         return;
4031 err:
4032         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4033 }
4034
4035 static struct pci_error_handlers be_eeh_handlers = {
4036         .error_detected = be_eeh_err_detected,
4037         .slot_reset = be_eeh_reset,
4038         .resume = be_eeh_resume,
4039 };
4040
4041 static struct pci_driver be_driver = {
4042         .name = DRV_NAME,
4043         .id_table = be_dev_ids,
4044         .probe = be_probe,
4045         .remove = be_remove,
4046         .suspend = be_suspend,
4047         .resume = be_resume,
4048         .shutdown = be_shutdown,
4049         .err_handler = &be_eeh_handlers
4050 };
4051
4052 static int __init be_init_module(void)
4053 {
4054         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4055             rx_frag_size != 2048) {
4056                 printk(KERN_WARNING DRV_NAME
4057                         " : Module param rx_frag_size must be 2048/4096/8192."
4058                         " Using 2048\n");
4059                 rx_frag_size = 2048;
4060         }
4061
4062         return pci_register_driver(&be_driver);
4063 }
4064 module_init(be_init_module);
4065
4066 static void __exit be_exit_module(void)
4067 {
4068         pci_unregister_driver(&be_driver);
4069 }
4070 module_exit(be_exit_module);