be2net: refactor HW workarounds in be_xmit()
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL | __GFP_ZERO);
150         if (!mem->va)
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176         int status = 0;
177
178         /* On lancer interrupts can't be controlled via this register */
179         if (lancer_chip(adapter))
180                 return;
181
182         if (adapter->eeh_error)
183                 return;
184
185         status = be_cmd_intr_set(adapter, enable);
186         if (status)
187                 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192         u32 val = 0;
193         val |= qid & DB_RQ_RING_ID_MASK;
194         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196         wmb();
197         iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201                           u16 posted)
202 {
203         u32 val = 0;
204         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207         wmb();
208         iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212                 bool arm, bool clear_int, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_EQ_RING_ID_MASK;
216         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_error)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_EQ_REARM_SHIFT;
224         if (clear_int)
225                 val |= 1 << DB_EQ_CLR_SHIFT;
226         val |= 1 << DB_EQ_EVNT_SHIFT;
227         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228         iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233         u32 val = 0;
234         val |= qid & DB_CQ_RING_ID_MASK;
235         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238         if (adapter->eeh_error)
239                 return;
240
241         if (arm)
242                 val |= 1 << DB_CQ_REARM_SHIFT;
243         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244         iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249         struct be_adapter *adapter = netdev_priv(netdev);
250         struct sockaddr *addr = p;
251         int status = 0;
252         u8 current_mac[ETH_ALEN];
253         u32 pmac_id = adapter->pmac_id[0];
254         bool active_mac = true;
255
256         if (!is_valid_ether_addr(addr->sa_data))
257                 return -EADDRNOTAVAIL;
258
259         /* For BE VF, MAC address is already activated by PF.
260          * Hence only operation left is updating netdev->devaddr.
261          * Update it if user is passing the same MAC which was used
262          * during configuring VF MAC from PF(Hypervisor).
263          */
264         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265                 status = be_cmd_mac_addr_query(adapter, current_mac,
266                                                false, adapter->if_handle, 0);
267                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268                         goto done;
269                 else
270                         goto err;
271         }
272
273         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274                 goto done;
275
276         /* For Lancer check if any MAC is active.
277          * If active, get its mac id.
278          */
279         if (lancer_chip(adapter) && !be_physfn(adapter))
280                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281                                          &pmac_id, 0);
282
283         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284                                  adapter->if_handle,
285                                  &adapter->pmac_id[0], 0);
286
287         if (status)
288                 goto err;
289
290         if (active_mac)
291                 be_cmd_pmac_del(adapter, adapter->if_handle,
292                                 pmac_id, 0);
293 done:
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         return 0;
296 err:
297         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298         return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304         if (BE2_chip(adapter)) {
305                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307                 return &cmd->hw_stats;
308         } else  {
309                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318         if (BE2_chip(adapter)) {
319                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321                 return &hw_stats->erx;
322         } else {
323                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325                 return &hw_stats->erx;
326         }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334         struct be_port_rxf_stats_v0 *port_stats =
335                                         &rxf_stats->port[adapter->port_num];
336         struct be_drv_stats *drvs = &adapter->drv_stats;
337
338         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339         drvs->rx_pause_frames = port_stats->rx_pause_frames;
340         drvs->rx_crc_errors = port_stats->rx_crc_errors;
341         drvs->rx_control_frames = port_stats->rx_control_frames;
342         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354         drvs->rx_dropped_header_too_small =
355                 port_stats->rx_dropped_header_too_small;
356         drvs->rx_address_filtered =
357                                         port_stats->rx_address_filtered +
358                                         port_stats->rx_vlan_filtered;
359         drvs->rx_alignment_symbol_errors =
360                 port_stats->rx_alignment_symbol_errors;
361
362         drvs->tx_pauseframes = port_stats->tx_pauseframes;
363         drvs->tx_controlframes = port_stats->tx_controlframes;
364
365         if (adapter->port_num)
366                 drvs->jabber_events = rxf_stats->port1_jabber_events;
367         else
368                 drvs->jabber_events = rxf_stats->port0_jabber_events;
369         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371         drvs->forwarded_packets = rxf_stats->forwarded_packets;
372         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383         struct be_port_rxf_stats_v1 *port_stats =
384                                         &rxf_stats->port[adapter->port_num];
385         struct be_drv_stats *drvs = &adapter->drv_stats;
386
387         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390         drvs->rx_pause_frames = port_stats->rx_pause_frames;
391         drvs->rx_crc_errors = port_stats->rx_crc_errors;
392         drvs->rx_control_frames = port_stats->rx_control_frames;
393         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403         drvs->rx_dropped_header_too_small =
404                 port_stats->rx_dropped_header_too_small;
405         drvs->rx_input_fifo_overflow_drop =
406                 port_stats->rx_input_fifo_overflow_drop;
407         drvs->rx_address_filtered = port_stats->rx_address_filtered;
408         drvs->rx_alignment_symbol_errors =
409                 port_stats->rx_alignment_symbol_errors;
410         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411         drvs->tx_pauseframes = port_stats->tx_pauseframes;
412         drvs->tx_controlframes = port_stats->tx_controlframes;
413         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
414         drvs->jabber_events = port_stats->jabber_events;
415         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
416         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
417         drvs->forwarded_packets = rxf_stats->forwarded_packets;
418         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
419         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
421         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422 }
423
424 static void populate_lancer_stats(struct be_adapter *adapter)
425 {
426
427         struct be_drv_stats *drvs = &adapter->drv_stats;
428         struct lancer_pport_stats *pport_stats =
429                                         pport_stats_from_cmd(adapter);
430
431         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
435         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
436         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
437         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441         drvs->rx_dropped_tcp_length =
442                                 pport_stats->rx_dropped_invalid_tcp_length;
443         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446         drvs->rx_dropped_header_too_small =
447                                 pport_stats->rx_dropped_header_too_small;
448         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
449         drvs->rx_address_filtered =
450                                         pport_stats->rx_address_filtered +
451                                         pport_stats->rx_vlan_filtered;
452         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
453         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
454         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
456         drvs->jabber_events = pport_stats->rx_jabbers;
457         drvs->forwarded_packets = pport_stats->num_forwards_lo;
458         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
459         drvs->rx_drops_too_many_frags =
460                                 pport_stats->rx_drops_too_many_frags_lo;
461 }
462
463 static void accumulate_16bit_val(u32 *acc, u16 val)
464 {
465 #define lo(x)                   (x & 0xFFFF)
466 #define hi(x)                   (x & 0xFFFF0000)
467         bool wrapped = val < lo(*acc);
468         u32 newacc = hi(*acc) + val;
469
470         if (wrapped)
471                 newacc += 65536;
472         ACCESS_ONCE(*acc) = newacc;
473 }
474
475 void populate_erx_stats(struct be_adapter *adapter,
476                         struct be_rx_obj *rxo,
477                         u32 erx_stat)
478 {
479         if (!BEx_chip(adapter))
480                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481         else
482                 /* below erx HW counter can actually wrap around after
483                  * 65535. Driver accumulates a 32-bit value
484                  */
485                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486                                      (u16)erx_stat);
487 }
488
489 void be_parse_stats(struct be_adapter *adapter)
490 {
491         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492         struct be_rx_obj *rxo;
493         int i;
494         u32 erx_stat;
495
496         if (lancer_chip(adapter)) {
497                 populate_lancer_stats(adapter);
498         } else {
499                 if (BE2_chip(adapter))
500                         populate_be_v0_stats(adapter);
501                 else
502                         /* for BE3 and Skyhawk */
503                         populate_be_v1_stats(adapter);
504
505                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506                 for_all_rx_queues(adapter, rxo, i) {
507                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508                         populate_erx_stats(adapter, rxo, erx_stat);
509                 }
510         }
511 }
512
513 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514                                         struct rtnl_link_stats64 *stats)
515 {
516         struct be_adapter *adapter = netdev_priv(netdev);
517         struct be_drv_stats *drvs = &adapter->drv_stats;
518         struct be_rx_obj *rxo;
519         struct be_tx_obj *txo;
520         u64 pkts, bytes;
521         unsigned int start;
522         int i;
523
524         for_all_rx_queues(adapter, rxo, i) {
525                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526                 do {
527                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528                         pkts = rx_stats(rxo)->rx_pkts;
529                         bytes = rx_stats(rxo)->rx_bytes;
530                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531                 stats->rx_packets += pkts;
532                 stats->rx_bytes += bytes;
533                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535                                         rx_stats(rxo)->rx_drops_no_frags;
536         }
537
538         for_all_tx_queues(adapter, txo, i) {
539                 const struct be_tx_stats *tx_stats = tx_stats(txo);
540                 do {
541                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542                         pkts = tx_stats(txo)->tx_pkts;
543                         bytes = tx_stats(txo)->tx_bytes;
544                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545                 stats->tx_packets += pkts;
546                 stats->tx_bytes += bytes;
547         }
548
549         /* bad pkts received */
550         stats->rx_errors = drvs->rx_crc_errors +
551                 drvs->rx_alignment_symbol_errors +
552                 drvs->rx_in_range_errors +
553                 drvs->rx_out_range_errors +
554                 drvs->rx_frame_too_long +
555                 drvs->rx_dropped_too_small +
556                 drvs->rx_dropped_too_short +
557                 drvs->rx_dropped_header_too_small +
558                 drvs->rx_dropped_tcp_length +
559                 drvs->rx_dropped_runt;
560
561         /* detailed rx errors */
562         stats->rx_length_errors = drvs->rx_in_range_errors +
563                 drvs->rx_out_range_errors +
564                 drvs->rx_frame_too_long;
565
566         stats->rx_crc_errors = drvs->rx_crc_errors;
567
568         /* frame alignment errors */
569         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
570
571         /* receiver fifo overrun */
572         /* drops_no_pbuf is no per i/f, it's per BE card */
573         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
574                                 drvs->rx_input_fifo_overflow_drop +
575                                 drvs->rx_drops_no_pbuf;
576         return stats;
577 }
578
579 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
580 {
581         struct net_device *netdev = adapter->netdev;
582
583         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
584                 netif_carrier_off(netdev);
585                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
586         }
587
588         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589                 netif_carrier_on(netdev);
590         else
591                 netif_carrier_off(netdev);
592 }
593
594 static void be_tx_stats_update(struct be_tx_obj *txo,
595                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
596 {
597         struct be_tx_stats *stats = tx_stats(txo);
598
599         u64_stats_update_begin(&stats->sync);
600         stats->tx_reqs++;
601         stats->tx_wrbs += wrb_cnt;
602         stats->tx_bytes += copied;
603         stats->tx_pkts += (gso_segs ? gso_segs : 1);
604         if (stopped)
605                 stats->tx_stops++;
606         u64_stats_update_end(&stats->sync);
607 }
608
609 /* Determine number of WRB entries needed to xmit data in an skb */
610 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611                                                                 bool *dummy)
612 {
613         int cnt = (skb->len > skb->data_len);
614
615         cnt += skb_shinfo(skb)->nr_frags;
616
617         /* to account for hdr wrb */
618         cnt++;
619         if (lancer_chip(adapter) || !(cnt & 1)) {
620                 *dummy = false;
621         } else {
622                 /* add a dummy to make it an even num */
623                 cnt++;
624                 *dummy = true;
625         }
626         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627         return cnt;
628 }
629
630 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631 {
632         wrb->frag_pa_hi = upper_32_bits(addr);
633         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
635         wrb->rsvd0 = 0;
636 }
637
638 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639                                         struct sk_buff *skb)
640 {
641         u8 vlan_prio;
642         u16 vlan_tag;
643
644         vlan_tag = vlan_tx_tag_get(skb);
645         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646         /* If vlan priority provided by OS is NOT in available bmap */
647         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649                                 adapter->recommended_prio;
650
651         return vlan_tag;
652 }
653
654 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
655                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
656 {
657         u16 vlan_tag;
658
659         memset(hdr, 0, sizeof(*hdr));
660
661         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
663         if (skb_is_gso(skb)) {
664                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666                         hdr, skb_shinfo(skb)->gso_size);
667                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
668                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
669         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670                 if (is_tcp_pkt(skb))
671                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672                 else if (is_udp_pkt(skb))
673                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674         }
675
676         if (vlan_tx_tag_present(skb)) {
677                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
678                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
679                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
680         }
681
682         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
684         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
685         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687 }
688
689 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
690                 bool unmap_single)
691 {
692         dma_addr_t dma;
693
694         be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
697         if (wrb->frag_len) {
698                 if (unmap_single)
699                         dma_unmap_single(dev, dma, wrb->frag_len,
700                                          DMA_TO_DEVICE);
701                 else
702                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
703         }
704 }
705
706 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
707                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708                 bool skip_hw_vlan)
709 {
710         dma_addr_t busaddr;
711         int i, copied = 0;
712         struct device *dev = &adapter->pdev->dev;
713         struct sk_buff *first_skb = skb;
714         struct be_eth_wrb *wrb;
715         struct be_eth_hdr_wrb *hdr;
716         bool map_single = false;
717         u16 map_head;
718
719         hdr = queue_head_node(txq);
720         queue_head_inc(txq);
721         map_head = txq->head;
722
723         if (skb->len > skb->data_len) {
724                 int len = skb_headlen(skb);
725                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726                 if (dma_mapping_error(dev, busaddr))
727                         goto dma_err;
728                 map_single = true;
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, busaddr, len);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733                 copied += len;
734         }
735
736         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
737                 const struct skb_frag_struct *frag =
738                         &skb_shinfo(skb)->frags[i];
739                 busaddr = skb_frag_dma_map(dev, frag, 0,
740                                            skb_frag_size(frag), DMA_TO_DEVICE);
741                 if (dma_mapping_error(dev, busaddr))
742                         goto dma_err;
743                 wrb = queue_head_node(txq);
744                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
745                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746                 queue_head_inc(txq);
747                 copied += skb_frag_size(frag);
748         }
749
750         if (dummy_wrb) {
751                 wrb = queue_head_node(txq);
752                 wrb_fill(wrb, 0, 0);
753                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754                 queue_head_inc(txq);
755         }
756
757         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
758         be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760         return copied;
761 dma_err:
762         txq->head = map_head;
763         while (copied) {
764                 wrb = queue_head_node(txq);
765                 unmap_tx_frag(dev, wrb, map_single);
766                 map_single = false;
767                 copied -= wrb->frag_len;
768                 queue_head_inc(txq);
769         }
770         return 0;
771 }
772
773 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
774                                              struct sk_buff *skb,
775                                              bool *skip_hw_vlan)
776 {
777         u16 vlan_tag = 0;
778
779         skb = skb_share_check(skb, GFP_ATOMIC);
780         if (unlikely(!skb))
781                 return skb;
782
783         if (vlan_tx_tag_present(skb)) {
784                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
786                 if (skb)
787                         skb->vlan_tci = 0;
788         }
789
790         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
791                 if (!vlan_tag)
792                         vlan_tag = adapter->pvid;
793                 if (skip_hw_vlan)
794                         *skip_hw_vlan = true;
795         }
796
797         if (vlan_tag) {
798                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
799                 if (unlikely(!skb))
800                         return skb;
801
802                 skb->vlan_tci = 0;
803         }
804
805         /* Insert the outer VLAN, if any */
806         if (adapter->qnq_vid) {
807                 vlan_tag = adapter->qnq_vid;
808                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
809                 if (unlikely(!skb))
810                         return skb;
811                 if (skip_hw_vlan)
812                         *skip_hw_vlan = true;
813         }
814
815         return skb;
816 }
817
818 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
819 {
820         struct ethhdr *eh = (struct ethhdr *)skb->data;
821         u16 offset = ETH_HLEN;
822
823         if (eh->h_proto == htons(ETH_P_IPV6)) {
824                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
825
826                 offset += sizeof(struct ipv6hdr);
827                 if (ip6h->nexthdr != NEXTHDR_TCP &&
828                     ip6h->nexthdr != NEXTHDR_UDP) {
829                         struct ipv6_opt_hdr *ehdr =
830                                 (struct ipv6_opt_hdr *) (skb->data + offset);
831
832                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
833                         if (ehdr->hdrlen == 0xff)
834                                 return true;
835                 }
836         }
837         return false;
838 }
839
840 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
841 {
842         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
843 }
844
845 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
846                                 struct sk_buff *skb)
847 {
848         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
849 }
850
851 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
852                                            struct sk_buff *skb,
853                                            bool *skip_hw_vlan)
854 {
855         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
856         unsigned int eth_hdr_len;
857         struct iphdr *ip;
858
859         /* For padded packets, BE HW modifies tot_len field in IP header
860          * incorrecly when VLAN tag is inserted by HW.
861          */
862         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
863                                                 VLAN_ETH_HLEN : ETH_HLEN;
864         if (skb->len <= 60 && vlan_tx_tag_present(skb) &&
865             is_ipv4_pkt(skb)) {
866                 ip = (struct iphdr *)ip_hdr(skb);
867                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
868         }
869
870         /* If vlan tag is already inlined in the packet, skip HW VLAN
871          * tagging in UMC mode
872          */
873         if ((adapter->function_mode & UMC_ENABLED) &&
874             veh->h_vlan_proto == htons(ETH_P_8021Q))
875                         *skip_hw_vlan = true;
876
877         /* HW has a bug wherein it will calculate CSUM for VLAN
878          * pkts even though it is disabled.
879          * Manually insert VLAN in pkt.
880          */
881         if (skb->ip_summed != CHECKSUM_PARTIAL &&
882             vlan_tx_tag_present(skb)) {
883                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
884                 if (unlikely(!skb))
885                         goto tx_drop;
886         }
887
888         /* HW may lockup when VLAN HW tagging is requested on
889          * certain ipv6 packets. Drop such pkts if the HW workaround to
890          * skip HW tagging is not enabled by FW.
891          */
892         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
893             (adapter->pvid || adapter->qnq_vid) &&
894             !qnq_async_evt_rcvd(adapter)))
895                 goto tx_drop;
896
897         /* Manual VLAN tag insertion to prevent:
898          * ASIC lockup when the ASIC inserts VLAN tag into
899          * certain ipv6 packets. Insert VLAN tags in driver,
900          * and set event, completion, vlan bits accordingly
901          * in the Tx WRB.
902          */
903         if (be_ipv6_tx_stall_chk(adapter, skb) &&
904             be_vlan_tag_tx_chk(adapter, skb)) {
905                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
906                 if (unlikely(!skb))
907                         goto tx_drop;
908         }
909
910         return skb;
911 tx_drop:
912         dev_kfree_skb_any(skb);
913         return NULL;
914 }
915
916 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
917 {
918         struct be_adapter *adapter = netdev_priv(netdev);
919         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
920         struct be_queue_info *txq = &txo->q;
921         bool dummy_wrb, stopped = false;
922         u32 wrb_cnt = 0, copied = 0;
923         bool skip_hw_vlan = false;
924         u32 start = txq->head;
925
926         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
927         if (!skb)
928                 return NETDEV_TX_OK;
929
930         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
931
932         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
933                               skip_hw_vlan);
934         if (copied) {
935                 int gso_segs = skb_shinfo(skb)->gso_segs;
936
937                 /* record the sent skb in the sent_skb table */
938                 BUG_ON(txo->sent_skb_list[start]);
939                 txo->sent_skb_list[start] = skb;
940
941                 /* Ensure txq has space for the next skb; Else stop the queue
942                  * *BEFORE* ringing the tx doorbell, so that we serialze the
943                  * tx compls of the current transmit which'll wake up the queue
944                  */
945                 atomic_add(wrb_cnt, &txq->used);
946                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
947                                                                 txq->len) {
948                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
949                         stopped = true;
950                 }
951
952                 be_txq_notify(adapter, txo, wrb_cnt);
953
954                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
955         } else {
956                 txq->head = start;
957                 dev_kfree_skb_any(skb);
958         }
959         return NETDEV_TX_OK;
960 }
961
962 static int be_change_mtu(struct net_device *netdev, int new_mtu)
963 {
964         struct be_adapter *adapter = netdev_priv(netdev);
965         if (new_mtu < BE_MIN_MTU ||
966                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
967                                         (ETH_HLEN + ETH_FCS_LEN))) {
968                 dev_info(&adapter->pdev->dev,
969                         "MTU must be between %d and %d bytes\n",
970                         BE_MIN_MTU,
971                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
972                 return -EINVAL;
973         }
974         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
975                         netdev->mtu, new_mtu);
976         netdev->mtu = new_mtu;
977         return 0;
978 }
979
980 /*
981  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
982  * If the user configures more, place BE in vlan promiscuous mode.
983  */
984 static int be_vid_config(struct be_adapter *adapter)
985 {
986         u16 vids[BE_NUM_VLANS_SUPPORTED];
987         u16 num = 0, i;
988         int status = 0;
989
990         /* No need to further configure vids if in promiscuous mode */
991         if (adapter->promiscuous)
992                 return 0;
993
994         if (adapter->vlans_added > adapter->max_vlans)
995                 goto set_vlan_promisc;
996
997         /* Construct VLAN Table to give to HW */
998         for (i = 0; i < VLAN_N_VID; i++)
999                 if (adapter->vlan_tag[i])
1000                         vids[num++] = cpu_to_le16(i);
1001
1002         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1003                                     vids, num, 1, 0);
1004
1005         /* Set to VLAN promisc mode as setting VLAN filter failed */
1006         if (status) {
1007                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1008                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1009                 goto set_vlan_promisc;
1010         }
1011
1012         return status;
1013
1014 set_vlan_promisc:
1015         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1016                                     NULL, 0, 1, 1);
1017         return status;
1018 }
1019
1020 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1021 {
1022         struct be_adapter *adapter = netdev_priv(netdev);
1023         int status = 0;
1024
1025         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1026                 status = -EINVAL;
1027                 goto ret;
1028         }
1029
1030         /* Packets with VID 0 are always received by Lancer by default */
1031         if (lancer_chip(adapter) && vid == 0)
1032                 goto ret;
1033
1034         adapter->vlan_tag[vid] = 1;
1035         if (adapter->vlans_added <= (adapter->max_vlans + 1))
1036                 status = be_vid_config(adapter);
1037
1038         if (!status)
1039                 adapter->vlans_added++;
1040         else
1041                 adapter->vlan_tag[vid] = 0;
1042 ret:
1043         return status;
1044 }
1045
1046 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1047 {
1048         struct be_adapter *adapter = netdev_priv(netdev);
1049         int status = 0;
1050
1051         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1052                 status = -EINVAL;
1053                 goto ret;
1054         }
1055
1056         /* Packets with VID 0 are always received by Lancer by default */
1057         if (lancer_chip(adapter) && vid == 0)
1058                 goto ret;
1059
1060         adapter->vlan_tag[vid] = 0;
1061         if (adapter->vlans_added <= adapter->max_vlans)
1062                 status = be_vid_config(adapter);
1063
1064         if (!status)
1065                 adapter->vlans_added--;
1066         else
1067                 adapter->vlan_tag[vid] = 1;
1068 ret:
1069         return status;
1070 }
1071
1072 static void be_set_rx_mode(struct net_device *netdev)
1073 {
1074         struct be_adapter *adapter = netdev_priv(netdev);
1075         int status;
1076
1077         if (netdev->flags & IFF_PROMISC) {
1078                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1079                 adapter->promiscuous = true;
1080                 goto done;
1081         }
1082
1083         /* BE was previously in promiscuous mode; disable it */
1084         if (adapter->promiscuous) {
1085                 adapter->promiscuous = false;
1086                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1087
1088                 if (adapter->vlans_added)
1089                         be_vid_config(adapter);
1090         }
1091
1092         /* Enable multicast promisc if num configured exceeds what we support */
1093         if (netdev->flags & IFF_ALLMULTI ||
1094             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1095                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1096                 goto done;
1097         }
1098
1099         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1100                 struct netdev_hw_addr *ha;
1101                 int i = 1; /* First slot is claimed by the Primary MAC */
1102
1103                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1104                         be_cmd_pmac_del(adapter, adapter->if_handle,
1105                                         adapter->pmac_id[i], 0);
1106                 }
1107
1108                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1109                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1110                         adapter->promiscuous = true;
1111                         goto done;
1112                 }
1113
1114                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1115                         adapter->uc_macs++; /* First slot is for Primary MAC */
1116                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1117                                         adapter->if_handle,
1118                                         &adapter->pmac_id[adapter->uc_macs], 0);
1119                 }
1120         }
1121
1122         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1123
1124         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1125         if (status) {
1126                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1127                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1128                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1129         }
1130 done:
1131         return;
1132 }
1133
1134 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1135 {
1136         struct be_adapter *adapter = netdev_priv(netdev);
1137         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1138         int status;
1139         bool active_mac = false;
1140         u32 pmac_id;
1141         u8 old_mac[ETH_ALEN];
1142
1143         if (!sriov_enabled(adapter))
1144                 return -EPERM;
1145
1146         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1147                 return -EINVAL;
1148
1149         if (lancer_chip(adapter)) {
1150                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1151                                                   &pmac_id, vf + 1);
1152                 if (!status && active_mac)
1153                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1154                                         pmac_id, vf + 1);
1155
1156                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1157         } else {
1158                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1159                                          vf_cfg->pmac_id, vf + 1);
1160
1161                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1162                                          &vf_cfg->pmac_id, vf + 1);
1163         }
1164
1165         if (status)
1166                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1167                                 mac, vf);
1168         else
1169                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1170
1171         return status;
1172 }
1173
1174 static int be_get_vf_config(struct net_device *netdev, int vf,
1175                         struct ifla_vf_info *vi)
1176 {
1177         struct be_adapter *adapter = netdev_priv(netdev);
1178         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1179
1180         if (!sriov_enabled(adapter))
1181                 return -EPERM;
1182
1183         if (vf >= adapter->num_vfs)
1184                 return -EINVAL;
1185
1186         vi->vf = vf;
1187         vi->tx_rate = vf_cfg->tx_rate;
1188         vi->vlan = vf_cfg->vlan_tag;
1189         vi->qos = 0;
1190         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1191
1192         return 0;
1193 }
1194
1195 static int be_set_vf_vlan(struct net_device *netdev,
1196                         int vf, u16 vlan, u8 qos)
1197 {
1198         struct be_adapter *adapter = netdev_priv(netdev);
1199         int status = 0;
1200
1201         if (!sriov_enabled(adapter))
1202                 return -EPERM;
1203
1204         if (vf >= adapter->num_vfs || vlan > 4095)
1205                 return -EINVAL;
1206
1207         if (vlan) {
1208                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1209                         /* If this is new value, program it. Else skip. */
1210                         adapter->vf_cfg[vf].vlan_tag = vlan;
1211
1212                         status = be_cmd_set_hsw_config(adapter, vlan,
1213                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1214                 }
1215         } else {
1216                 /* Reset Transparent Vlan Tagging. */
1217                 adapter->vf_cfg[vf].vlan_tag = 0;
1218                 vlan = adapter->vf_cfg[vf].def_vid;
1219                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1220                         adapter->vf_cfg[vf].if_handle);
1221         }
1222
1223
1224         if (status)
1225                 dev_info(&adapter->pdev->dev,
1226                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1227         return status;
1228 }
1229
1230 static int be_set_vf_tx_rate(struct net_device *netdev,
1231                         int vf, int rate)
1232 {
1233         struct be_adapter *adapter = netdev_priv(netdev);
1234         int status = 0;
1235
1236         if (!sriov_enabled(adapter))
1237                 return -EPERM;
1238
1239         if (vf >= adapter->num_vfs)
1240                 return -EINVAL;
1241
1242         if (rate < 100 || rate > 10000) {
1243                 dev_err(&adapter->pdev->dev,
1244                         "tx rate must be between 100 and 10000 Mbps\n");
1245                 return -EINVAL;
1246         }
1247
1248         if (lancer_chip(adapter))
1249                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1250         else
1251                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1252
1253         if (status)
1254                 dev_err(&adapter->pdev->dev,
1255                                 "tx rate %d on VF %d failed\n", rate, vf);
1256         else
1257                 adapter->vf_cfg[vf].tx_rate = rate;
1258         return status;
1259 }
1260
1261 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1262 {
1263         struct pci_dev *dev, *pdev = adapter->pdev;
1264         int vfs = 0, assigned_vfs = 0, pos;
1265         u16 offset, stride;
1266
1267         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1268         if (!pos)
1269                 return 0;
1270         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1271         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1272
1273         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1274         while (dev) {
1275                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1276                         vfs++;
1277                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1278                                 assigned_vfs++;
1279                 }
1280                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1281         }
1282         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1283 }
1284
1285 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1286 {
1287         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1288         ulong now = jiffies;
1289         ulong delta = now - stats->rx_jiffies;
1290         u64 pkts;
1291         unsigned int start, eqd;
1292
1293         if (!eqo->enable_aic) {
1294                 eqd = eqo->eqd;
1295                 goto modify_eqd;
1296         }
1297
1298         if (eqo->idx >= adapter->num_rx_qs)
1299                 return;
1300
1301         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1302
1303         /* Wrapped around */
1304         if (time_before(now, stats->rx_jiffies)) {
1305                 stats->rx_jiffies = now;
1306                 return;
1307         }
1308
1309         /* Update once a second */
1310         if (delta < HZ)
1311                 return;
1312
1313         do {
1314                 start = u64_stats_fetch_begin_bh(&stats->sync);
1315                 pkts = stats->rx_pkts;
1316         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1317
1318         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1319         stats->rx_pkts_prev = pkts;
1320         stats->rx_jiffies = now;
1321         eqd = (stats->rx_pps / 110000) << 3;
1322         eqd = min(eqd, eqo->max_eqd);
1323         eqd = max(eqd, eqo->min_eqd);
1324         if (eqd < 10)
1325                 eqd = 0;
1326
1327 modify_eqd:
1328         if (eqd != eqo->cur_eqd) {
1329                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1330                 eqo->cur_eqd = eqd;
1331         }
1332 }
1333
1334 static void be_rx_stats_update(struct be_rx_obj *rxo,
1335                 struct be_rx_compl_info *rxcp)
1336 {
1337         struct be_rx_stats *stats = rx_stats(rxo);
1338
1339         u64_stats_update_begin(&stats->sync);
1340         stats->rx_compl++;
1341         stats->rx_bytes += rxcp->pkt_size;
1342         stats->rx_pkts++;
1343         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1344                 stats->rx_mcast_pkts++;
1345         if (rxcp->err)
1346                 stats->rx_compl_err++;
1347         u64_stats_update_end(&stats->sync);
1348 }
1349
1350 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1351 {
1352         /* L4 checksum is not reliable for non TCP/UDP packets.
1353          * Also ignore ipcksm for ipv6 pkts */
1354         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1355                                 (rxcp->ip_csum || rxcp->ipv6);
1356 }
1357
1358 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1359                                                 u16 frag_idx)
1360 {
1361         struct be_adapter *adapter = rxo->adapter;
1362         struct be_rx_page_info *rx_page_info;
1363         struct be_queue_info *rxq = &rxo->q;
1364
1365         rx_page_info = &rxo->page_info_tbl[frag_idx];
1366         BUG_ON(!rx_page_info->page);
1367
1368         if (rx_page_info->last_page_user) {
1369                 dma_unmap_page(&adapter->pdev->dev,
1370                                dma_unmap_addr(rx_page_info, bus),
1371                                adapter->big_page_size, DMA_FROM_DEVICE);
1372                 rx_page_info->last_page_user = false;
1373         }
1374
1375         atomic_dec(&rxq->used);
1376         return rx_page_info;
1377 }
1378
1379 /* Throwaway the data in the Rx completion */
1380 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1381                                 struct be_rx_compl_info *rxcp)
1382 {
1383         struct be_queue_info *rxq = &rxo->q;
1384         struct be_rx_page_info *page_info;
1385         u16 i, num_rcvd = rxcp->num_rcvd;
1386
1387         for (i = 0; i < num_rcvd; i++) {
1388                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1389                 put_page(page_info->page);
1390                 memset(page_info, 0, sizeof(*page_info));
1391                 index_inc(&rxcp->rxq_idx, rxq->len);
1392         }
1393 }
1394
1395 /*
1396  * skb_fill_rx_data forms a complete skb for an ether frame
1397  * indicated by rxcp.
1398  */
1399 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1400                              struct be_rx_compl_info *rxcp)
1401 {
1402         struct be_queue_info *rxq = &rxo->q;
1403         struct be_rx_page_info *page_info;
1404         u16 i, j;
1405         u16 hdr_len, curr_frag_len, remaining;
1406         u8 *start;
1407
1408         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1409         start = page_address(page_info->page) + page_info->page_offset;
1410         prefetch(start);
1411
1412         /* Copy data in the first descriptor of this completion */
1413         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1414
1415         skb->len = curr_frag_len;
1416         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1417                 memcpy(skb->data, start, curr_frag_len);
1418                 /* Complete packet has now been moved to data */
1419                 put_page(page_info->page);
1420                 skb->data_len = 0;
1421                 skb->tail += curr_frag_len;
1422         } else {
1423                 hdr_len = ETH_HLEN;
1424                 memcpy(skb->data, start, hdr_len);
1425                 skb_shinfo(skb)->nr_frags = 1;
1426                 skb_frag_set_page(skb, 0, page_info->page);
1427                 skb_shinfo(skb)->frags[0].page_offset =
1428                                         page_info->page_offset + hdr_len;
1429                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1430                 skb->data_len = curr_frag_len - hdr_len;
1431                 skb->truesize += rx_frag_size;
1432                 skb->tail += hdr_len;
1433         }
1434         page_info->page = NULL;
1435
1436         if (rxcp->pkt_size <= rx_frag_size) {
1437                 BUG_ON(rxcp->num_rcvd != 1);
1438                 return;
1439         }
1440
1441         /* More frags present for this completion */
1442         index_inc(&rxcp->rxq_idx, rxq->len);
1443         remaining = rxcp->pkt_size - curr_frag_len;
1444         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1445                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1446                 curr_frag_len = min(remaining, rx_frag_size);
1447
1448                 /* Coalesce all frags from the same physical page in one slot */
1449                 if (page_info->page_offset == 0) {
1450                         /* Fresh page */
1451                         j++;
1452                         skb_frag_set_page(skb, j, page_info->page);
1453                         skb_shinfo(skb)->frags[j].page_offset =
1454                                                         page_info->page_offset;
1455                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1456                         skb_shinfo(skb)->nr_frags++;
1457                 } else {
1458                         put_page(page_info->page);
1459                 }
1460
1461                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1462                 skb->len += curr_frag_len;
1463                 skb->data_len += curr_frag_len;
1464                 skb->truesize += rx_frag_size;
1465                 remaining -= curr_frag_len;
1466                 index_inc(&rxcp->rxq_idx, rxq->len);
1467                 page_info->page = NULL;
1468         }
1469         BUG_ON(j > MAX_SKB_FRAGS);
1470 }
1471
1472 /* Process the RX completion indicated by rxcp when GRO is disabled */
1473 static void be_rx_compl_process(struct be_rx_obj *rxo,
1474                                 struct be_rx_compl_info *rxcp)
1475 {
1476         struct be_adapter *adapter = rxo->adapter;
1477         struct net_device *netdev = adapter->netdev;
1478         struct sk_buff *skb;
1479
1480         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1481         if (unlikely(!skb)) {
1482                 rx_stats(rxo)->rx_drops_no_skbs++;
1483                 be_rx_compl_discard(rxo, rxcp);
1484                 return;
1485         }
1486
1487         skb_fill_rx_data(rxo, skb, rxcp);
1488
1489         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1490                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1491         else
1492                 skb_checksum_none_assert(skb);
1493
1494         skb->protocol = eth_type_trans(skb, netdev);
1495         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1496         if (netdev->features & NETIF_F_RXHASH)
1497                 skb->rxhash = rxcp->rss_hash;
1498
1499
1500         if (rxcp->vlanf)
1501                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1502
1503         netif_receive_skb(skb);
1504 }
1505
1506 /* Process the RX completion indicated by rxcp when GRO is enabled */
1507 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1508                              struct be_rx_compl_info *rxcp)
1509 {
1510         struct be_adapter *adapter = rxo->adapter;
1511         struct be_rx_page_info *page_info;
1512         struct sk_buff *skb = NULL;
1513         struct be_queue_info *rxq = &rxo->q;
1514         u16 remaining, curr_frag_len;
1515         u16 i, j;
1516
1517         skb = napi_get_frags(napi);
1518         if (!skb) {
1519                 be_rx_compl_discard(rxo, rxcp);
1520                 return;
1521         }
1522
1523         remaining = rxcp->pkt_size;
1524         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1525                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1526
1527                 curr_frag_len = min(remaining, rx_frag_size);
1528
1529                 /* Coalesce all frags from the same physical page in one slot */
1530                 if (i == 0 || page_info->page_offset == 0) {
1531                         /* First frag or Fresh page */
1532                         j++;
1533                         skb_frag_set_page(skb, j, page_info->page);
1534                         skb_shinfo(skb)->frags[j].page_offset =
1535                                                         page_info->page_offset;
1536                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1537                 } else {
1538                         put_page(page_info->page);
1539                 }
1540                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1541                 skb->truesize += rx_frag_size;
1542                 remaining -= curr_frag_len;
1543                 index_inc(&rxcp->rxq_idx, rxq->len);
1544                 memset(page_info, 0, sizeof(*page_info));
1545         }
1546         BUG_ON(j > MAX_SKB_FRAGS);
1547
1548         skb_shinfo(skb)->nr_frags = j + 1;
1549         skb->len = rxcp->pkt_size;
1550         skb->data_len = rxcp->pkt_size;
1551         skb->ip_summed = CHECKSUM_UNNECESSARY;
1552         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1553         if (adapter->netdev->features & NETIF_F_RXHASH)
1554                 skb->rxhash = rxcp->rss_hash;
1555
1556         if (rxcp->vlanf)
1557                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1558
1559         napi_gro_frags(napi);
1560 }
1561
1562 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1563                                  struct be_rx_compl_info *rxcp)
1564 {
1565         rxcp->pkt_size =
1566                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1567         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1568         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1569         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1570         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1571         rxcp->ip_csum =
1572                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1573         rxcp->l4_csum =
1574                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1575         rxcp->ipv6 =
1576                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1577         rxcp->rxq_idx =
1578                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1579         rxcp->num_rcvd =
1580                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1581         rxcp->pkt_type =
1582                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1583         rxcp->rss_hash =
1584                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1585         if (rxcp->vlanf) {
1586                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1587                                           compl);
1588                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1589                                                compl);
1590         }
1591         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1592 }
1593
1594 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1595                                  struct be_rx_compl_info *rxcp)
1596 {
1597         rxcp->pkt_size =
1598                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1599         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1600         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1601         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1602         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1603         rxcp->ip_csum =
1604                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1605         rxcp->l4_csum =
1606                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1607         rxcp->ipv6 =
1608                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1609         rxcp->rxq_idx =
1610                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1611         rxcp->num_rcvd =
1612                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1613         rxcp->pkt_type =
1614                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1615         rxcp->rss_hash =
1616                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1617         if (rxcp->vlanf) {
1618                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1619                                           compl);
1620                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1621                                                compl);
1622         }
1623         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1624 }
1625
1626 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1627 {
1628         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1629         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1630         struct be_adapter *adapter = rxo->adapter;
1631
1632         /* For checking the valid bit it is Ok to use either definition as the
1633          * valid bit is at the same position in both v0 and v1 Rx compl */
1634         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1635                 return NULL;
1636
1637         rmb();
1638         be_dws_le_to_cpu(compl, sizeof(*compl));
1639
1640         if (adapter->be3_native)
1641                 be_parse_rx_compl_v1(compl, rxcp);
1642         else
1643                 be_parse_rx_compl_v0(compl, rxcp);
1644
1645         if (rxcp->vlanf) {
1646                 /* vlanf could be wrongly set in some cards.
1647                  * ignore if vtm is not set */
1648                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1649                         rxcp->vlanf = 0;
1650
1651                 if (!lancer_chip(adapter))
1652                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1653
1654                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1655                     !adapter->vlan_tag[rxcp->vlan_tag])
1656                         rxcp->vlanf = 0;
1657         }
1658
1659         /* As the compl has been parsed, reset it; we wont touch it again */
1660         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1661
1662         queue_tail_inc(&rxo->cq);
1663         return rxcp;
1664 }
1665
1666 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1667 {
1668         u32 order = get_order(size);
1669
1670         if (order > 0)
1671                 gfp |= __GFP_COMP;
1672         return  alloc_pages(gfp, order);
1673 }
1674
1675 /*
1676  * Allocate a page, split it to fragments of size rx_frag_size and post as
1677  * receive buffers to BE
1678  */
1679 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1680 {
1681         struct be_adapter *adapter = rxo->adapter;
1682         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1683         struct be_queue_info *rxq = &rxo->q;
1684         struct page *pagep = NULL;
1685         struct be_eth_rx_d *rxd;
1686         u64 page_dmaaddr = 0, frag_dmaaddr;
1687         u32 posted, page_offset = 0;
1688
1689         page_info = &rxo->page_info_tbl[rxq->head];
1690         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1691                 if (!pagep) {
1692                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1693                         if (unlikely(!pagep)) {
1694                                 rx_stats(rxo)->rx_post_fail++;
1695                                 break;
1696                         }
1697                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1698                                                     0, adapter->big_page_size,
1699                                                     DMA_FROM_DEVICE);
1700                         page_info->page_offset = 0;
1701                 } else {
1702                         get_page(pagep);
1703                         page_info->page_offset = page_offset + rx_frag_size;
1704                 }
1705                 page_offset = page_info->page_offset;
1706                 page_info->page = pagep;
1707                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1708                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1709
1710                 rxd = queue_head_node(rxq);
1711                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1712                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1713
1714                 /* Any space left in the current big page for another frag? */
1715                 if ((page_offset + rx_frag_size + rx_frag_size) >
1716                                         adapter->big_page_size) {
1717                         pagep = NULL;
1718                         page_info->last_page_user = true;
1719                 }
1720
1721                 prev_page_info = page_info;
1722                 queue_head_inc(rxq);
1723                 page_info = &rxo->page_info_tbl[rxq->head];
1724         }
1725         if (pagep)
1726                 prev_page_info->last_page_user = true;
1727
1728         if (posted) {
1729                 atomic_add(posted, &rxq->used);
1730                 be_rxq_notify(adapter, rxq->id, posted);
1731         } else if (atomic_read(&rxq->used) == 0) {
1732                 /* Let be_worker replenish when memory is available */
1733                 rxo->rx_post_starved = true;
1734         }
1735 }
1736
1737 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1738 {
1739         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1740
1741         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1742                 return NULL;
1743
1744         rmb();
1745         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1746
1747         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1748
1749         queue_tail_inc(tx_cq);
1750         return txcp;
1751 }
1752
1753 static u16 be_tx_compl_process(struct be_adapter *adapter,
1754                 struct be_tx_obj *txo, u16 last_index)
1755 {
1756         struct be_queue_info *txq = &txo->q;
1757         struct be_eth_wrb *wrb;
1758         struct sk_buff **sent_skbs = txo->sent_skb_list;
1759         struct sk_buff *sent_skb;
1760         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1761         bool unmap_skb_hdr = true;
1762
1763         sent_skb = sent_skbs[txq->tail];
1764         BUG_ON(!sent_skb);
1765         sent_skbs[txq->tail] = NULL;
1766
1767         /* skip header wrb */
1768         queue_tail_inc(txq);
1769
1770         do {
1771                 cur_index = txq->tail;
1772                 wrb = queue_tail_node(txq);
1773                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1774                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1775                 unmap_skb_hdr = false;
1776
1777                 num_wrbs++;
1778                 queue_tail_inc(txq);
1779         } while (cur_index != last_index);
1780
1781         kfree_skb(sent_skb);
1782         return num_wrbs;
1783 }
1784
1785 /* Return the number of events in the event queue */
1786 static inline int events_get(struct be_eq_obj *eqo)
1787 {
1788         struct be_eq_entry *eqe;
1789         int num = 0;
1790
1791         do {
1792                 eqe = queue_tail_node(&eqo->q);
1793                 if (eqe->evt == 0)
1794                         break;
1795
1796                 rmb();
1797                 eqe->evt = 0;
1798                 num++;
1799                 queue_tail_inc(&eqo->q);
1800         } while (true);
1801
1802         return num;
1803 }
1804
1805 /* Leaves the EQ is disarmed state */
1806 static void be_eq_clean(struct be_eq_obj *eqo)
1807 {
1808         int num = events_get(eqo);
1809
1810         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1811 }
1812
1813 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1814 {
1815         struct be_rx_page_info *page_info;
1816         struct be_queue_info *rxq = &rxo->q;
1817         struct be_queue_info *rx_cq = &rxo->cq;
1818         struct be_rx_compl_info *rxcp;
1819         struct be_adapter *adapter = rxo->adapter;
1820         int flush_wait = 0;
1821         u16 tail;
1822
1823         /* Consume pending rx completions.
1824          * Wait for the flush completion (identified by zero num_rcvd)
1825          * to arrive. Notify CQ even when there are no more CQ entries
1826          * for HW to flush partially coalesced CQ entries.
1827          * In Lancer, there is no need to wait for flush compl.
1828          */
1829         for (;;) {
1830                 rxcp = be_rx_compl_get(rxo);
1831                 if (rxcp == NULL) {
1832                         if (lancer_chip(adapter))
1833                                 break;
1834
1835                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1836                                 dev_warn(&adapter->pdev->dev,
1837                                          "did not receive flush compl\n");
1838                                 break;
1839                         }
1840                         be_cq_notify(adapter, rx_cq->id, true, 0);
1841                         mdelay(1);
1842                 } else {
1843                         be_rx_compl_discard(rxo, rxcp);
1844                         be_cq_notify(adapter, rx_cq->id, false, 1);
1845                         if (rxcp->num_rcvd == 0)
1846                                 break;
1847                 }
1848         }
1849
1850         /* After cleanup, leave the CQ in unarmed state */
1851         be_cq_notify(adapter, rx_cq->id, false, 0);
1852
1853         /* Then free posted rx buffers that were not used */
1854         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1855         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1856                 page_info = get_rx_page_info(rxo, tail);
1857                 put_page(page_info->page);
1858                 memset(page_info, 0, sizeof(*page_info));
1859         }
1860         BUG_ON(atomic_read(&rxq->used));
1861         rxq->tail = rxq->head = 0;
1862 }
1863
1864 static void be_tx_compl_clean(struct be_adapter *adapter)
1865 {
1866         struct be_tx_obj *txo;
1867         struct be_queue_info *txq;
1868         struct be_eth_tx_compl *txcp;
1869         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1870         struct sk_buff *sent_skb;
1871         bool dummy_wrb;
1872         int i, pending_txqs;
1873
1874         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1875         do {
1876                 pending_txqs = adapter->num_tx_qs;
1877
1878                 for_all_tx_queues(adapter, txo, i) {
1879                         txq = &txo->q;
1880                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1881                                 end_idx =
1882                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1883                                                       wrb_index, txcp);
1884                                 num_wrbs += be_tx_compl_process(adapter, txo,
1885                                                                 end_idx);
1886                                 cmpl++;
1887                         }
1888                         if (cmpl) {
1889                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1890                                 atomic_sub(num_wrbs, &txq->used);
1891                                 cmpl = 0;
1892                                 num_wrbs = 0;
1893                         }
1894                         if (atomic_read(&txq->used) == 0)
1895                                 pending_txqs--;
1896                 }
1897
1898                 if (pending_txqs == 0 || ++timeo > 200)
1899                         break;
1900
1901                 mdelay(1);
1902         } while (true);
1903
1904         for_all_tx_queues(adapter, txo, i) {
1905                 txq = &txo->q;
1906                 if (atomic_read(&txq->used))
1907                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1908                                 atomic_read(&txq->used));
1909
1910                 /* free posted tx for which compls will never arrive */
1911                 while (atomic_read(&txq->used)) {
1912                         sent_skb = txo->sent_skb_list[txq->tail];
1913                         end_idx = txq->tail;
1914                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1915                                                    &dummy_wrb);
1916                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1917                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1918                         atomic_sub(num_wrbs, &txq->used);
1919                 }
1920         }
1921 }
1922
1923 static void be_evt_queues_destroy(struct be_adapter *adapter)
1924 {
1925         struct be_eq_obj *eqo;
1926         int i;
1927
1928         for_all_evt_queues(adapter, eqo, i) {
1929                 if (eqo->q.created) {
1930                         be_eq_clean(eqo);
1931                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1932                 }
1933                 be_queue_free(adapter, &eqo->q);
1934         }
1935 }
1936
1937 static int be_evt_queues_create(struct be_adapter *adapter)
1938 {
1939         struct be_queue_info *eq;
1940         struct be_eq_obj *eqo;
1941         int i, rc;
1942
1943         adapter->num_evt_qs = num_irqs(adapter);
1944
1945         for_all_evt_queues(adapter, eqo, i) {
1946                 eqo->adapter = adapter;
1947                 eqo->tx_budget = BE_TX_BUDGET;
1948                 eqo->idx = i;
1949                 eqo->max_eqd = BE_MAX_EQD;
1950                 eqo->enable_aic = true;
1951
1952                 eq = &eqo->q;
1953                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1954                                         sizeof(struct be_eq_entry));
1955                 if (rc)
1956                         return rc;
1957
1958                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1959                 if (rc)
1960                         return rc;
1961         }
1962         return 0;
1963 }
1964
1965 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1966 {
1967         struct be_queue_info *q;
1968
1969         q = &adapter->mcc_obj.q;
1970         if (q->created)
1971                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1972         be_queue_free(adapter, q);
1973
1974         q = &adapter->mcc_obj.cq;
1975         if (q->created)
1976                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1977         be_queue_free(adapter, q);
1978 }
1979
1980 /* Must be called only after TX qs are created as MCC shares TX EQ */
1981 static int be_mcc_queues_create(struct be_adapter *adapter)
1982 {
1983         struct be_queue_info *q, *cq;
1984
1985         cq = &adapter->mcc_obj.cq;
1986         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1987                         sizeof(struct be_mcc_compl)))
1988                 goto err;
1989
1990         /* Use the default EQ for MCC completions */
1991         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1992                 goto mcc_cq_free;
1993
1994         q = &adapter->mcc_obj.q;
1995         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1996                 goto mcc_cq_destroy;
1997
1998         if (be_cmd_mccq_create(adapter, q, cq))
1999                 goto mcc_q_free;
2000
2001         return 0;
2002
2003 mcc_q_free:
2004         be_queue_free(adapter, q);
2005 mcc_cq_destroy:
2006         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2007 mcc_cq_free:
2008         be_queue_free(adapter, cq);
2009 err:
2010         return -1;
2011 }
2012
2013 static void be_tx_queues_destroy(struct be_adapter *adapter)
2014 {
2015         struct be_queue_info *q;
2016         struct be_tx_obj *txo;
2017         u8 i;
2018
2019         for_all_tx_queues(adapter, txo, i) {
2020                 q = &txo->q;
2021                 if (q->created)
2022                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2023                 be_queue_free(adapter, q);
2024
2025                 q = &txo->cq;
2026                 if (q->created)
2027                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2028                 be_queue_free(adapter, q);
2029         }
2030 }
2031
2032 static int be_num_txqs_want(struct be_adapter *adapter)
2033 {
2034         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2035             be_is_mc(adapter) ||
2036             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2037             BE2_chip(adapter))
2038                 return 1;
2039         else
2040                 return adapter->max_tx_queues;
2041 }
2042
2043 static int be_tx_cqs_create(struct be_adapter *adapter)
2044 {
2045         struct be_queue_info *cq, *eq;
2046         int status;
2047         struct be_tx_obj *txo;
2048         u8 i;
2049
2050         adapter->num_tx_qs = be_num_txqs_want(adapter);
2051         if (adapter->num_tx_qs != MAX_TX_QS) {
2052                 rtnl_lock();
2053                 netif_set_real_num_tx_queues(adapter->netdev,
2054                         adapter->num_tx_qs);
2055                 rtnl_unlock();
2056         }
2057
2058         for_all_tx_queues(adapter, txo, i) {
2059                 cq = &txo->cq;
2060                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2061                                         sizeof(struct be_eth_tx_compl));
2062                 if (status)
2063                         return status;
2064
2065                 /* If num_evt_qs is less than num_tx_qs, then more than
2066                  * one txq share an eq
2067                  */
2068                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2069                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2070                 if (status)
2071                         return status;
2072         }
2073         return 0;
2074 }
2075
2076 static int be_tx_qs_create(struct be_adapter *adapter)
2077 {
2078         struct be_tx_obj *txo;
2079         int i, status;
2080
2081         for_all_tx_queues(adapter, txo, i) {
2082                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2083                                         sizeof(struct be_eth_wrb));
2084                 if (status)
2085                         return status;
2086
2087                 status = be_cmd_txq_create(adapter, txo);
2088                 if (status)
2089                         return status;
2090         }
2091
2092         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2093                  adapter->num_tx_qs);
2094         return 0;
2095 }
2096
2097 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2098 {
2099         struct be_queue_info *q;
2100         struct be_rx_obj *rxo;
2101         int i;
2102
2103         for_all_rx_queues(adapter, rxo, i) {
2104                 q = &rxo->cq;
2105                 if (q->created)
2106                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2107                 be_queue_free(adapter, q);
2108         }
2109 }
2110
2111 static int be_rx_cqs_create(struct be_adapter *adapter)
2112 {
2113         struct be_queue_info *eq, *cq;
2114         struct be_rx_obj *rxo;
2115         int rc, i;
2116
2117         /* We'll create as many RSS rings as there are irqs.
2118          * But when there's only one irq there's no use creating RSS rings
2119          */
2120         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2121                                 num_irqs(adapter) + 1 : 1;
2122         if (adapter->num_rx_qs != MAX_RX_QS) {
2123                 rtnl_lock();
2124                 netif_set_real_num_rx_queues(adapter->netdev,
2125                                              adapter->num_rx_qs);
2126                 rtnl_unlock();
2127         }
2128
2129         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2130         for_all_rx_queues(adapter, rxo, i) {
2131                 rxo->adapter = adapter;
2132                 cq = &rxo->cq;
2133                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2134                                 sizeof(struct be_eth_rx_compl));
2135                 if (rc)
2136                         return rc;
2137
2138                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2139                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2140                 if (rc)
2141                         return rc;
2142         }
2143
2144         dev_info(&adapter->pdev->dev,
2145                  "created %d RSS queue(s) and 1 default RX queue\n",
2146                  adapter->num_rx_qs - 1);
2147         return 0;
2148 }
2149
2150 static irqreturn_t be_intx(int irq, void *dev)
2151 {
2152         struct be_eq_obj *eqo = dev;
2153         struct be_adapter *adapter = eqo->adapter;
2154         int num_evts = 0;
2155
2156         /* IRQ is not expected when NAPI is scheduled as the EQ
2157          * will not be armed.
2158          * But, this can happen on Lancer INTx where it takes
2159          * a while to de-assert INTx or in BE2 where occasionaly
2160          * an interrupt may be raised even when EQ is unarmed.
2161          * If NAPI is already scheduled, then counting & notifying
2162          * events will orphan them.
2163          */
2164         if (napi_schedule_prep(&eqo->napi)) {
2165                 num_evts = events_get(eqo);
2166                 __napi_schedule(&eqo->napi);
2167                 if (num_evts)
2168                         eqo->spurious_intr = 0;
2169         }
2170         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2171
2172         /* Return IRQ_HANDLED only for the the first spurious intr
2173          * after a valid intr to stop the kernel from branding
2174          * this irq as a bad one!
2175          */
2176         if (num_evts || eqo->spurious_intr++ == 0)
2177                 return IRQ_HANDLED;
2178         else
2179                 return IRQ_NONE;
2180 }
2181
2182 static irqreturn_t be_msix(int irq, void *dev)
2183 {
2184         struct be_eq_obj *eqo = dev;
2185
2186         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2187         napi_schedule(&eqo->napi);
2188         return IRQ_HANDLED;
2189 }
2190
2191 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2192 {
2193         return (rxcp->tcpf && !rxcp->err) ? true : false;
2194 }
2195
2196 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2197                         int budget)
2198 {
2199         struct be_adapter *adapter = rxo->adapter;
2200         struct be_queue_info *rx_cq = &rxo->cq;
2201         struct be_rx_compl_info *rxcp;
2202         u32 work_done;
2203
2204         for (work_done = 0; work_done < budget; work_done++) {
2205                 rxcp = be_rx_compl_get(rxo);
2206                 if (!rxcp)
2207                         break;
2208
2209                 /* Is it a flush compl that has no data */
2210                 if (unlikely(rxcp->num_rcvd == 0))
2211                         goto loop_continue;
2212
2213                 /* Discard compl with partial DMA Lancer B0 */
2214                 if (unlikely(!rxcp->pkt_size)) {
2215                         be_rx_compl_discard(rxo, rxcp);
2216                         goto loop_continue;
2217                 }
2218
2219                 /* On BE drop pkts that arrive due to imperfect filtering in
2220                  * promiscuous mode on some skews
2221                  */
2222                 if (unlikely(rxcp->port != adapter->port_num &&
2223                                 !lancer_chip(adapter))) {
2224                         be_rx_compl_discard(rxo, rxcp);
2225                         goto loop_continue;
2226                 }
2227
2228                 if (do_gro(rxcp))
2229                         be_rx_compl_process_gro(rxo, napi, rxcp);
2230                 else
2231                         be_rx_compl_process(rxo, rxcp);
2232 loop_continue:
2233                 be_rx_stats_update(rxo, rxcp);
2234         }
2235
2236         if (work_done) {
2237                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2238
2239                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2240                         be_post_rx_frags(rxo, GFP_ATOMIC);
2241         }
2242
2243         return work_done;
2244 }
2245
2246 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2247                           int budget, int idx)
2248 {
2249         struct be_eth_tx_compl *txcp;
2250         int num_wrbs = 0, work_done;
2251
2252         for (work_done = 0; work_done < budget; work_done++) {
2253                 txcp = be_tx_compl_get(&txo->cq);
2254                 if (!txcp)
2255                         break;
2256                 num_wrbs += be_tx_compl_process(adapter, txo,
2257                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2258                                         wrb_index, txcp));
2259         }
2260
2261         if (work_done) {
2262                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2263                 atomic_sub(num_wrbs, &txo->q.used);
2264
2265                 /* As Tx wrbs have been freed up, wake up netdev queue
2266                  * if it was stopped due to lack of tx wrbs.  */
2267                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2268                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2269                         netif_wake_subqueue(adapter->netdev, idx);
2270                 }
2271
2272                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2273                 tx_stats(txo)->tx_compl += work_done;
2274                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2275         }
2276         return (work_done < budget); /* Done */
2277 }
2278
2279 int be_poll(struct napi_struct *napi, int budget)
2280 {
2281         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2282         struct be_adapter *adapter = eqo->adapter;
2283         int max_work = 0, work, i, num_evts;
2284         bool tx_done;
2285
2286         num_evts = events_get(eqo);
2287
2288         /* Process all TXQs serviced by this EQ */
2289         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2290                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2291                                         eqo->tx_budget, i);
2292                 if (!tx_done)
2293                         max_work = budget;
2294         }
2295
2296         /* This loop will iterate twice for EQ0 in which
2297          * completions of the last RXQ (default one) are also processed
2298          * For other EQs the loop iterates only once
2299          */
2300         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2301                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2302                 max_work = max(work, max_work);
2303         }
2304
2305         if (is_mcc_eqo(eqo))
2306                 be_process_mcc(adapter);
2307
2308         if (max_work < budget) {
2309                 napi_complete(napi);
2310                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2311         } else {
2312                 /* As we'll continue in polling mode, count and clear events */
2313                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2314         }
2315         return max_work;
2316 }
2317
2318 void be_detect_error(struct be_adapter *adapter)
2319 {
2320         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2321         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2322         u32 i;
2323
2324         if (be_hw_error(adapter))
2325                 return;
2326
2327         if (lancer_chip(adapter)) {
2328                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2329                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2330                         sliport_err1 = ioread32(adapter->db +
2331                                         SLIPORT_ERROR1_OFFSET);
2332                         sliport_err2 = ioread32(adapter->db +
2333                                         SLIPORT_ERROR2_OFFSET);
2334                 }
2335         } else {
2336                 pci_read_config_dword(adapter->pdev,
2337                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2338                 pci_read_config_dword(adapter->pdev,
2339                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2340                 pci_read_config_dword(adapter->pdev,
2341                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2342                 pci_read_config_dword(adapter->pdev,
2343                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2344
2345                 ue_lo = (ue_lo & ~ue_lo_mask);
2346                 ue_hi = (ue_hi & ~ue_hi_mask);
2347         }
2348
2349         /* On certain platforms BE hardware can indicate spurious UEs.
2350          * Allow the h/w to stop working completely in case of a real UE.
2351          * Hence not setting the hw_error for UE detection.
2352          */
2353         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2354                 adapter->hw_error = true;
2355                 dev_err(&adapter->pdev->dev,
2356                         "Error detected in the card\n");
2357         }
2358
2359         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2360                 dev_err(&adapter->pdev->dev,
2361                         "ERR: sliport status 0x%x\n", sliport_status);
2362                 dev_err(&adapter->pdev->dev,
2363                         "ERR: sliport error1 0x%x\n", sliport_err1);
2364                 dev_err(&adapter->pdev->dev,
2365                         "ERR: sliport error2 0x%x\n", sliport_err2);
2366         }
2367
2368         if (ue_lo) {
2369                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2370                         if (ue_lo & 1)
2371                                 dev_err(&adapter->pdev->dev,
2372                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2373                 }
2374         }
2375
2376         if (ue_hi) {
2377                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2378                         if (ue_hi & 1)
2379                                 dev_err(&adapter->pdev->dev,
2380                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2381                 }
2382         }
2383
2384 }
2385
2386 static void be_msix_disable(struct be_adapter *adapter)
2387 {
2388         if (msix_enabled(adapter)) {
2389                 pci_disable_msix(adapter->pdev);
2390                 adapter->num_msix_vec = 0;
2391         }
2392 }
2393
2394 static uint be_num_rss_want(struct be_adapter *adapter)
2395 {
2396         u32 num = 0;
2397
2398         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2399             (lancer_chip(adapter) ||
2400              (!sriov_want(adapter) && be_physfn(adapter)))) {
2401                 num = adapter->max_rss_queues;
2402                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2403         }
2404         return num;
2405 }
2406
2407 static int be_msix_enable(struct be_adapter *adapter)
2408 {
2409 #define BE_MIN_MSIX_VECTORS             1
2410         int i, status, num_vec, num_roce_vec = 0;
2411         struct device *dev = &adapter->pdev->dev;
2412
2413         /* If RSS queues are not used, need a vec for default RX Q */
2414         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2415         if (be_roce_supported(adapter)) {
2416                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2417                                         (num_online_cpus() + 1));
2418                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2419                 num_vec += num_roce_vec;
2420                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2421         }
2422         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2423
2424         for (i = 0; i < num_vec; i++)
2425                 adapter->msix_entries[i].entry = i;
2426
2427         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2428         if (status == 0) {
2429                 goto done;
2430         } else if (status >= BE_MIN_MSIX_VECTORS) {
2431                 num_vec = status;
2432                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2433                                          num_vec);
2434                 if (!status)
2435                         goto done;
2436         }
2437
2438         dev_warn(dev, "MSIx enable failed\n");
2439         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2440         if (!be_physfn(adapter))
2441                 return status;
2442         return 0;
2443 done:
2444         if (be_roce_supported(adapter)) {
2445                 if (num_vec > num_roce_vec) {
2446                         adapter->num_msix_vec = num_vec - num_roce_vec;
2447                         adapter->num_msix_roce_vec =
2448                                 num_vec - adapter->num_msix_vec;
2449                 } else {
2450                         adapter->num_msix_vec = num_vec;
2451                         adapter->num_msix_roce_vec = 0;
2452                 }
2453         } else
2454                 adapter->num_msix_vec = num_vec;
2455         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2456         return 0;
2457 }
2458
2459 static inline int be_msix_vec_get(struct be_adapter *adapter,
2460                                 struct be_eq_obj *eqo)
2461 {
2462         return adapter->msix_entries[eqo->idx].vector;
2463 }
2464
2465 static int be_msix_register(struct be_adapter *adapter)
2466 {
2467         struct net_device *netdev = adapter->netdev;
2468         struct be_eq_obj *eqo;
2469         int status, i, vec;
2470
2471         for_all_evt_queues(adapter, eqo, i) {
2472                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2473                 vec = be_msix_vec_get(adapter, eqo);
2474                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2475                 if (status)
2476                         goto err_msix;
2477         }
2478
2479         return 0;
2480 err_msix:
2481         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2482                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2483         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2484                 status);
2485         be_msix_disable(adapter);
2486         return status;
2487 }
2488
2489 static int be_irq_register(struct be_adapter *adapter)
2490 {
2491         struct net_device *netdev = adapter->netdev;
2492         int status;
2493
2494         if (msix_enabled(adapter)) {
2495                 status = be_msix_register(adapter);
2496                 if (status == 0)
2497                         goto done;
2498                 /* INTx is not supported for VF */
2499                 if (!be_physfn(adapter))
2500                         return status;
2501         }
2502
2503         /* INTx: only the first EQ is used */
2504         netdev->irq = adapter->pdev->irq;
2505         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2506                              &adapter->eq_obj[0]);
2507         if (status) {
2508                 dev_err(&adapter->pdev->dev,
2509                         "INTx request IRQ failed - err %d\n", status);
2510                 return status;
2511         }
2512 done:
2513         adapter->isr_registered = true;
2514         return 0;
2515 }
2516
2517 static void be_irq_unregister(struct be_adapter *adapter)
2518 {
2519         struct net_device *netdev = adapter->netdev;
2520         struct be_eq_obj *eqo;
2521         int i;
2522
2523         if (!adapter->isr_registered)
2524                 return;
2525
2526         /* INTx */
2527         if (!msix_enabled(adapter)) {
2528                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2529                 goto done;
2530         }
2531
2532         /* MSIx */
2533         for_all_evt_queues(adapter, eqo, i)
2534                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2535
2536 done:
2537         adapter->isr_registered = false;
2538 }
2539
2540 static void be_rx_qs_destroy(struct be_adapter *adapter)
2541 {
2542         struct be_queue_info *q;
2543         struct be_rx_obj *rxo;
2544         int i;
2545
2546         for_all_rx_queues(adapter, rxo, i) {
2547                 q = &rxo->q;
2548                 if (q->created) {
2549                         be_cmd_rxq_destroy(adapter, q);
2550                         be_rx_cq_clean(rxo);
2551                 }
2552                 be_queue_free(adapter, q);
2553         }
2554 }
2555
2556 static int be_close(struct net_device *netdev)
2557 {
2558         struct be_adapter *adapter = netdev_priv(netdev);
2559         struct be_eq_obj *eqo;
2560         int i;
2561
2562         be_roce_dev_close(adapter);
2563
2564         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2565                 for_all_evt_queues(adapter, eqo, i)
2566                         napi_disable(&eqo->napi);
2567                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2568         }
2569
2570         be_async_mcc_disable(adapter);
2571
2572         /* Wait for all pending tx completions to arrive so that
2573          * all tx skbs are freed.
2574          */
2575         be_tx_compl_clean(adapter);
2576         netif_tx_disable(netdev);
2577
2578         be_rx_qs_destroy(adapter);
2579
2580         for_all_evt_queues(adapter, eqo, i) {
2581                 if (msix_enabled(adapter))
2582                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2583                 else
2584                         synchronize_irq(netdev->irq);
2585                 be_eq_clean(eqo);
2586         }
2587
2588         be_irq_unregister(adapter);
2589
2590         return 0;
2591 }
2592
2593 static int be_rx_qs_create(struct be_adapter *adapter)
2594 {
2595         struct be_rx_obj *rxo;
2596         int rc, i, j;
2597         u8 rsstable[128];
2598
2599         for_all_rx_queues(adapter, rxo, i) {
2600                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2601                                     sizeof(struct be_eth_rx_d));
2602                 if (rc)
2603                         return rc;
2604         }
2605
2606         /* The FW would like the default RXQ to be created first */
2607         rxo = default_rxo(adapter);
2608         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2609                                adapter->if_handle, false, &rxo->rss_id);
2610         if (rc)
2611                 return rc;
2612
2613         for_all_rss_queues(adapter, rxo, i) {
2614                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2615                                        rx_frag_size, adapter->if_handle,
2616                                        true, &rxo->rss_id);
2617                 if (rc)
2618                         return rc;
2619         }
2620
2621         if (be_multi_rxq(adapter)) {
2622                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2623                         for_all_rss_queues(adapter, rxo, i) {
2624                                 if ((j + i) >= 128)
2625                                         break;
2626                                 rsstable[j + i] = rxo->rss_id;
2627                         }
2628                 }
2629                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2630                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2631
2632                 if (!BEx_chip(adapter))
2633                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2634                                                 RSS_ENABLE_UDP_IPV6;
2635
2636                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2637                                        128);
2638                 if (rc) {
2639                         adapter->rss_flags = 0;
2640                         return rc;
2641                 }
2642         }
2643
2644         /* First time posting */
2645         for_all_rx_queues(adapter, rxo, i)
2646                 be_post_rx_frags(rxo, GFP_KERNEL);
2647         return 0;
2648 }
2649
2650 static int be_open(struct net_device *netdev)
2651 {
2652         struct be_adapter *adapter = netdev_priv(netdev);
2653         struct be_eq_obj *eqo;
2654         struct be_rx_obj *rxo;
2655         struct be_tx_obj *txo;
2656         u8 link_status;
2657         int status, i;
2658
2659         status = be_rx_qs_create(adapter);
2660         if (status)
2661                 goto err;
2662
2663         status = be_irq_register(adapter);
2664         if (status)
2665                 goto err;
2666
2667         for_all_rx_queues(adapter, rxo, i)
2668                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2669
2670         for_all_tx_queues(adapter, txo, i)
2671                 be_cq_notify(adapter, txo->cq.id, true, 0);
2672
2673         be_async_mcc_enable(adapter);
2674
2675         for_all_evt_queues(adapter, eqo, i) {
2676                 napi_enable(&eqo->napi);
2677                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2678         }
2679         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2680
2681         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2682         if (!status)
2683                 be_link_status_update(adapter, link_status);
2684
2685         netif_tx_start_all_queues(netdev);
2686         be_roce_dev_open(adapter);
2687         return 0;
2688 err:
2689         be_close(adapter->netdev);
2690         return -EIO;
2691 }
2692
2693 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2694 {
2695         struct be_dma_mem cmd;
2696         int status = 0;
2697         u8 mac[ETH_ALEN];
2698
2699         memset(mac, 0, ETH_ALEN);
2700
2701         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2702         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2703                                     GFP_KERNEL | __GFP_ZERO);
2704         if (cmd.va == NULL)
2705                 return -1;
2706
2707         if (enable) {
2708                 status = pci_write_config_dword(adapter->pdev,
2709                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2710                 if (status) {
2711                         dev_err(&adapter->pdev->dev,
2712                                 "Could not enable Wake-on-lan\n");
2713                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2714                                           cmd.dma);
2715                         return status;
2716                 }
2717                 status = be_cmd_enable_magic_wol(adapter,
2718                                 adapter->netdev->dev_addr, &cmd);
2719                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2720                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2721         } else {
2722                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2723                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2724                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2725         }
2726
2727         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2728         return status;
2729 }
2730
2731 /*
2732  * Generate a seed MAC address from the PF MAC Address using jhash.
2733  * MAC Address for VFs are assigned incrementally starting from the seed.
2734  * These addresses are programmed in the ASIC by the PF and the VF driver
2735  * queries for the MAC address during its probe.
2736  */
2737 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2738 {
2739         u32 vf;
2740         int status = 0;
2741         u8 mac[ETH_ALEN];
2742         struct be_vf_cfg *vf_cfg;
2743
2744         be_vf_eth_addr_generate(adapter, mac);
2745
2746         for_all_vfs(adapter, vf_cfg, vf) {
2747                 if (lancer_chip(adapter)) {
2748                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2749                 } else {
2750                         status = be_cmd_pmac_add(adapter, mac,
2751                                                  vf_cfg->if_handle,
2752                                                  &vf_cfg->pmac_id, vf + 1);
2753                 }
2754
2755                 if (status)
2756                         dev_err(&adapter->pdev->dev,
2757                         "Mac address assignment failed for VF %d\n", vf);
2758                 else
2759                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2760
2761                 mac[5] += 1;
2762         }
2763         return status;
2764 }
2765
2766 static int be_vfs_mac_query(struct be_adapter *adapter)
2767 {
2768         int status, vf;
2769         u8 mac[ETH_ALEN];
2770         struct be_vf_cfg *vf_cfg;
2771         bool active;
2772
2773         for_all_vfs(adapter, vf_cfg, vf) {
2774                 be_cmd_get_mac_from_list(adapter, mac, &active,
2775                                          &vf_cfg->pmac_id, 0);
2776
2777                 status = be_cmd_mac_addr_query(adapter, mac, false,
2778                                                vf_cfg->if_handle, 0);
2779                 if (status)
2780                         return status;
2781                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2782         }
2783         return 0;
2784 }
2785
2786 static void be_vf_clear(struct be_adapter *adapter)
2787 {
2788         struct be_vf_cfg *vf_cfg;
2789         u32 vf;
2790
2791         if (be_find_vfs(adapter, ASSIGNED)) {
2792                 dev_warn(&adapter->pdev->dev,
2793                          "VFs are assigned to VMs: not disabling VFs\n");
2794                 goto done;
2795         }
2796
2797         pci_disable_sriov(adapter->pdev);
2798
2799         for_all_vfs(adapter, vf_cfg, vf) {
2800                 if (lancer_chip(adapter))
2801                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2802                 else
2803                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2804                                         vf_cfg->pmac_id, vf + 1);
2805
2806                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2807         }
2808 done:
2809         kfree(adapter->vf_cfg);
2810         adapter->num_vfs = 0;
2811 }
2812
2813 static int be_clear(struct be_adapter *adapter)
2814 {
2815         int i = 1;
2816
2817         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2818                 cancel_delayed_work_sync(&adapter->work);
2819                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2820         }
2821
2822         if (sriov_enabled(adapter))
2823                 be_vf_clear(adapter);
2824
2825         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2826                 be_cmd_pmac_del(adapter, adapter->if_handle,
2827                         adapter->pmac_id[i], 0);
2828
2829         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2830
2831         be_mcc_queues_destroy(adapter);
2832         be_rx_cqs_destroy(adapter);
2833         be_tx_queues_destroy(adapter);
2834         be_evt_queues_destroy(adapter);
2835
2836         kfree(adapter->pmac_id);
2837         adapter->pmac_id = NULL;
2838
2839         be_msix_disable(adapter);
2840         return 0;
2841 }
2842
2843 static int be_vfs_if_create(struct be_adapter *adapter)
2844 {
2845         struct be_vf_cfg *vf_cfg;
2846         u32 cap_flags, en_flags, vf;
2847         int status;
2848
2849         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2850                     BE_IF_FLAGS_MULTICAST;
2851
2852         for_all_vfs(adapter, vf_cfg, vf) {
2853                 if (!BE3_chip(adapter))
2854                         be_cmd_get_profile_config(adapter, &cap_flags,
2855                                                   NULL, vf + 1);
2856
2857                 /* If a FW profile exists, then cap_flags are updated */
2858                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2859                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2860                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2861                                           &vf_cfg->if_handle, vf + 1);
2862                 if (status)
2863                         goto err;
2864         }
2865 err:
2866         return status;
2867 }
2868
2869 static int be_vf_setup_init(struct be_adapter *adapter)
2870 {
2871         struct be_vf_cfg *vf_cfg;
2872         int vf;
2873
2874         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2875                                   GFP_KERNEL);
2876         if (!adapter->vf_cfg)
2877                 return -ENOMEM;
2878
2879         for_all_vfs(adapter, vf_cfg, vf) {
2880                 vf_cfg->if_handle = -1;
2881                 vf_cfg->pmac_id = -1;
2882         }
2883         return 0;
2884 }
2885
2886 static int be_vf_setup(struct be_adapter *adapter)
2887 {
2888         struct be_vf_cfg *vf_cfg;
2889         u16 def_vlan, lnk_speed;
2890         int status, old_vfs, vf;
2891         struct device *dev = &adapter->pdev->dev;
2892
2893         old_vfs = be_find_vfs(adapter, ENABLED);
2894         if (old_vfs) {
2895                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2896                 if (old_vfs != num_vfs)
2897                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2898                 adapter->num_vfs = old_vfs;
2899         } else {
2900                 if (num_vfs > adapter->dev_num_vfs)
2901                         dev_info(dev, "Device supports %d VFs and not %d\n",
2902                                  adapter->dev_num_vfs, num_vfs);
2903                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2904                 if (!adapter->num_vfs)
2905                         return 0;
2906         }
2907
2908         status = be_vf_setup_init(adapter);
2909         if (status)
2910                 goto err;
2911
2912         if (old_vfs) {
2913                 for_all_vfs(adapter, vf_cfg, vf) {
2914                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2915                         if (status)
2916                                 goto err;
2917                 }
2918         } else {
2919                 status = be_vfs_if_create(adapter);
2920                 if (status)
2921                         goto err;
2922         }
2923
2924         if (old_vfs) {
2925                 status = be_vfs_mac_query(adapter);
2926                 if (status)
2927                         goto err;
2928         } else {
2929                 status = be_vf_eth_addr_config(adapter);
2930                 if (status)
2931                         goto err;
2932         }
2933
2934         for_all_vfs(adapter, vf_cfg, vf) {
2935                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2936                  * Allow full available bandwidth
2937                  */
2938                 if (BE3_chip(adapter) && !old_vfs)
2939                         be_cmd_set_qos(adapter, 1000, vf+1);
2940
2941                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2942                                                   NULL, vf + 1);
2943                 if (!status)
2944                         vf_cfg->tx_rate = lnk_speed;
2945
2946                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2947                                                vf + 1, vf_cfg->if_handle);
2948                 if (status)
2949                         goto err;
2950                 vf_cfg->def_vid = def_vlan;
2951
2952                 be_cmd_enable_vf(adapter, vf + 1);
2953         }
2954
2955         if (!old_vfs) {
2956                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2957                 if (status) {
2958                         dev_err(dev, "SRIOV enable failed\n");
2959                         adapter->num_vfs = 0;
2960                         goto err;
2961                 }
2962         }
2963         return 0;
2964 err:
2965         dev_err(dev, "VF setup failed\n");
2966         be_vf_clear(adapter);
2967         return status;
2968 }
2969
2970 static void be_setup_init(struct be_adapter *adapter)
2971 {
2972         adapter->vlan_prio_bmap = 0xff;
2973         adapter->phy.link_speed = -1;
2974         adapter->if_handle = -1;
2975         adapter->be3_native = false;
2976         adapter->promiscuous = false;
2977         if (be_physfn(adapter))
2978                 adapter->cmd_privileges = MAX_PRIVILEGES;
2979         else
2980                 adapter->cmd_privileges = MIN_PRIVILEGES;
2981 }
2982
2983 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2984                            bool *active_mac, u32 *pmac_id)
2985 {
2986         int status = 0;
2987
2988         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2989                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2990                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2991                         *active_mac = true;
2992                 else
2993                         *active_mac = false;
2994
2995                 return status;
2996         }
2997
2998         if (lancer_chip(adapter)) {
2999                 status = be_cmd_get_mac_from_list(adapter, mac,
3000                                                   active_mac, pmac_id, 0);
3001                 if (*active_mac) {
3002                         status = be_cmd_mac_addr_query(adapter, mac, false,
3003                                                        if_handle, *pmac_id);
3004                 }
3005         } else if (be_physfn(adapter)) {
3006                 /* For BE3, for PF get permanent MAC */
3007                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
3008                 *active_mac = false;
3009         } else {
3010                 /* For BE3, for VF get soft MAC assigned by PF*/
3011                 status = be_cmd_mac_addr_query(adapter, mac, false,
3012                                                if_handle, 0);
3013                 *active_mac = true;
3014         }
3015         return status;
3016 }
3017
3018 static void be_get_resources(struct be_adapter *adapter)
3019 {
3020         u16 dev_num_vfs;
3021         int pos, status;
3022         bool profile_present = false;
3023         u16 txq_count = 0;
3024
3025         if (!BEx_chip(adapter)) {
3026                 status = be_cmd_get_func_config(adapter);
3027                 if (!status)
3028                         profile_present = true;
3029         } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3030                 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3031         }
3032
3033         if (profile_present) {
3034                 /* Sanity fixes for Lancer */
3035                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3036                                               BE_UC_PMAC_COUNT);
3037                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3038                                            BE_NUM_VLANS_SUPPORTED);
3039                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3040                                                BE_MAX_MC);
3041                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3042                                                MAX_TX_QS);
3043                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3044                                                 BE3_MAX_RSS_QS);
3045                 adapter->max_event_queues = min_t(u16,
3046                                                   adapter->max_event_queues,
3047                                                   BE3_MAX_RSS_QS);
3048
3049                 if (adapter->max_rss_queues &&
3050                     adapter->max_rss_queues == adapter->max_rx_queues)
3051                         adapter->max_rss_queues -= 1;
3052
3053                 if (adapter->max_event_queues < adapter->max_rss_queues)
3054                         adapter->max_rss_queues = adapter->max_event_queues;
3055
3056         } else {
3057                 if (be_physfn(adapter))
3058                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3059                 else
3060                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3061
3062                 if (adapter->function_mode & FLEX10_MODE)
3063                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3064                 else
3065                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3066
3067                 adapter->max_mcast_mac = BE_MAX_MC;
3068                 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3069                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3070                                                MAX_TX_QS);
3071                 adapter->max_rss_queues = (adapter->be3_native) ?
3072                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3073                 adapter->max_event_queues = BE3_MAX_RSS_QS;
3074
3075                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3076                                         BE_IF_FLAGS_BROADCAST |
3077                                         BE_IF_FLAGS_MULTICAST |
3078                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
3079                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
3080                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
3081                                         BE_IF_FLAGS_PROMISCUOUS;
3082
3083                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3084                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3085         }
3086
3087         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3088         if (pos) {
3089                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3090                                      &dev_num_vfs);
3091                 if (BE3_chip(adapter))
3092                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3093                 adapter->dev_num_vfs = dev_num_vfs;
3094         }
3095 }
3096
3097 /* Routine to query per function resource limits */
3098 static int be_get_config(struct be_adapter *adapter)
3099 {
3100         int status;
3101
3102         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3103                                      &adapter->function_mode,
3104                                      &adapter->function_caps,
3105                                      &adapter->asic_rev);
3106         if (status)
3107                 goto err;
3108
3109         be_get_resources(adapter);
3110
3111         /* primary mac needs 1 pmac entry */
3112         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3113                                    sizeof(u32), GFP_KERNEL);
3114         if (!adapter->pmac_id) {
3115                 status = -ENOMEM;
3116                 goto err;
3117         }
3118
3119 err:
3120         return status;
3121 }
3122
3123 static int be_setup(struct be_adapter *adapter)
3124 {
3125         struct device *dev = &adapter->pdev->dev;
3126         u32 en_flags;
3127         u32 tx_fc, rx_fc;
3128         int status;
3129         u8 mac[ETH_ALEN];
3130         bool active_mac;
3131
3132         be_setup_init(adapter);
3133
3134         if (!lancer_chip(adapter))
3135                 be_cmd_req_native_mode(adapter);
3136
3137         status = be_get_config(adapter);
3138         if (status)
3139                 goto err;
3140
3141         status = be_msix_enable(adapter);
3142         if (status)
3143                 goto err;
3144
3145         status = be_evt_queues_create(adapter);
3146         if (status)
3147                 goto err;
3148
3149         status = be_tx_cqs_create(adapter);
3150         if (status)
3151                 goto err;
3152
3153         status = be_rx_cqs_create(adapter);
3154         if (status)
3155                 goto err;
3156
3157         status = be_mcc_queues_create(adapter);
3158         if (status)
3159                 goto err;
3160
3161         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3162         /* In UMC mode FW does not return right privileges.
3163          * Override with correct privilege equivalent to PF.
3164          */
3165         if (be_is_mc(adapter))
3166                 adapter->cmd_privileges = MAX_PRIVILEGES;
3167
3168         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3169                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3170
3171         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3172                 en_flags |= BE_IF_FLAGS_RSS;
3173
3174         en_flags = en_flags & adapter->if_cap_flags;
3175
3176         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3177                                   &adapter->if_handle, 0);
3178         if (status != 0)
3179                 goto err;
3180
3181         memset(mac, 0, ETH_ALEN);
3182         active_mac = false;
3183         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3184                                  &active_mac, &adapter->pmac_id[0]);
3185         if (status != 0)
3186                 goto err;
3187
3188         if (!active_mac) {
3189                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3190                                          &adapter->pmac_id[0], 0);
3191                 if (status != 0)
3192                         goto err;
3193         }
3194
3195         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3196                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3197                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3198         }
3199
3200         status = be_tx_qs_create(adapter);
3201         if (status)
3202                 goto err;
3203
3204         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3205
3206         if (adapter->vlans_added)
3207                 be_vid_config(adapter);
3208
3209         be_set_rx_mode(adapter->netdev);
3210
3211         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3212
3213         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3214                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3215                                         adapter->rx_fc);
3216
3217         if (be_physfn(adapter)) {
3218                 if (adapter->dev_num_vfs)
3219                         be_vf_setup(adapter);
3220                 else
3221                         dev_warn(dev, "device doesn't support SRIOV\n");
3222         }
3223
3224         status = be_cmd_get_phy_info(adapter);
3225         if (!status && be_pause_supported(adapter))
3226                 adapter->phy.fc_autoneg = 1;
3227
3228         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3229         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3230         return 0;
3231 err:
3232         be_clear(adapter);
3233         return status;
3234 }
3235
3236 #ifdef CONFIG_NET_POLL_CONTROLLER
3237 static void be_netpoll(struct net_device *netdev)
3238 {
3239         struct be_adapter *adapter = netdev_priv(netdev);
3240         struct be_eq_obj *eqo;
3241         int i;
3242
3243         for_all_evt_queues(adapter, eqo, i) {
3244                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3245                 napi_schedule(&eqo->napi);
3246         }
3247
3248         return;
3249 }
3250 #endif
3251
3252 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3253 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3254
3255 static bool be_flash_redboot(struct be_adapter *adapter,
3256                         const u8 *p, u32 img_start, int image_size,
3257                         int hdr_size)
3258 {
3259         u32 crc_offset;
3260         u8 flashed_crc[4];
3261         int status;
3262
3263         crc_offset = hdr_size + img_start + image_size - 4;
3264
3265         p += crc_offset;
3266
3267         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3268                         (image_size - 4));
3269         if (status) {
3270                 dev_err(&adapter->pdev->dev,
3271                 "could not get crc from flash, not flashing redboot\n");
3272                 return false;
3273         }
3274
3275         /*update redboot only if crc does not match*/
3276         if (!memcmp(flashed_crc, p, 4))
3277                 return false;
3278         else
3279                 return true;
3280 }
3281
3282 static bool phy_flashing_required(struct be_adapter *adapter)
3283 {
3284         return (adapter->phy.phy_type == TN_8022 &&
3285                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3286 }
3287
3288 static bool is_comp_in_ufi(struct be_adapter *adapter,
3289                            struct flash_section_info *fsec, int type)
3290 {
3291         int i = 0, img_type = 0;
3292         struct flash_section_info_g2 *fsec_g2 = NULL;
3293
3294         if (BE2_chip(adapter))
3295                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3296
3297         for (i = 0; i < MAX_FLASH_COMP; i++) {
3298                 if (fsec_g2)
3299                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3300                 else
3301                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3302
3303                 if (img_type == type)
3304                         return true;
3305         }
3306         return false;
3307
3308 }
3309
3310 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3311                                          int header_size,
3312                                          const struct firmware *fw)
3313 {
3314         struct flash_section_info *fsec = NULL;
3315         const u8 *p = fw->data;
3316
3317         p += header_size;
3318         while (p < (fw->data + fw->size)) {
3319                 fsec = (struct flash_section_info *)p;
3320                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3321                         return fsec;
3322                 p += 32;
3323         }
3324         return NULL;
3325 }
3326
3327 static int be_flash(struct be_adapter *adapter, const u8 *img,
3328                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3329 {
3330         u32 total_bytes = 0, flash_op, num_bytes = 0;
3331         int status = 0;
3332         struct be_cmd_write_flashrom *req = flash_cmd->va;
3333
3334         total_bytes = img_size;
3335         while (total_bytes) {
3336                 num_bytes = min_t(u32, 32*1024, total_bytes);
3337
3338                 total_bytes -= num_bytes;
3339
3340                 if (!total_bytes) {
3341                         if (optype == OPTYPE_PHY_FW)
3342                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3343                         else
3344                                 flash_op = FLASHROM_OPER_FLASH;
3345                 } else {
3346                         if (optype == OPTYPE_PHY_FW)
3347                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3348                         else
3349                                 flash_op = FLASHROM_OPER_SAVE;
3350                 }
3351
3352                 memcpy(req->data_buf, img, num_bytes);
3353                 img += num_bytes;
3354                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3355                                                 flash_op, num_bytes);
3356                 if (status) {
3357                         if (status == ILLEGAL_IOCTL_REQ &&
3358                             optype == OPTYPE_PHY_FW)
3359                                 break;
3360                         dev_err(&adapter->pdev->dev,
3361                                 "cmd to write to flash rom failed.\n");
3362                         return status;
3363                 }
3364         }
3365         return 0;
3366 }
3367
3368 /* For BE2, BE3 and BE3-R */
3369 static int be_flash_BEx(struct be_adapter *adapter,
3370                          const struct firmware *fw,
3371                          struct be_dma_mem *flash_cmd,
3372                          int num_of_images)
3373
3374 {
3375         int status = 0, i, filehdr_size = 0;
3376         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3377         const u8 *p = fw->data;
3378         const struct flash_comp *pflashcomp;
3379         int num_comp, redboot;
3380         struct flash_section_info *fsec = NULL;
3381
3382         struct flash_comp gen3_flash_types[] = {
3383                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3384                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3385                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3386                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3387                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3388                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3389                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3390                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3391                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3392                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3393                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3394                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3395                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3396                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3397                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3398                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3399                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3400                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3401                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3402                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3403         };
3404
3405         struct flash_comp gen2_flash_types[] = {
3406                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3407                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3408                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3409                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3410                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3411                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3412                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3413                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3414                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3415                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3416                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3417                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3418                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3419                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3420                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3421                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3422         };
3423
3424         if (BE3_chip(adapter)) {
3425                 pflashcomp = gen3_flash_types;
3426                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3427                 num_comp = ARRAY_SIZE(gen3_flash_types);
3428         } else {
3429                 pflashcomp = gen2_flash_types;
3430                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3431                 num_comp = ARRAY_SIZE(gen2_flash_types);
3432         }
3433
3434         /* Get flash section info*/
3435         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3436         if (!fsec) {
3437                 dev_err(&adapter->pdev->dev,
3438                         "Invalid Cookie. UFI corrupted ?\n");
3439                 return -1;
3440         }
3441         for (i = 0; i < num_comp; i++) {
3442                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3443                         continue;
3444
3445                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3446                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3447                         continue;
3448
3449                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3450                     !phy_flashing_required(adapter))
3451                                 continue;
3452
3453                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3454                         redboot = be_flash_redboot(adapter, fw->data,
3455                                 pflashcomp[i].offset, pflashcomp[i].size,
3456                                 filehdr_size + img_hdrs_size);
3457                         if (!redboot)
3458                                 continue;
3459                 }
3460
3461                 p = fw->data;
3462                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3463                 if (p + pflashcomp[i].size > fw->data + fw->size)
3464                         return -1;
3465
3466                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3467                                         pflashcomp[i].size);
3468                 if (status) {
3469                         dev_err(&adapter->pdev->dev,
3470                                 "Flashing section type %d failed.\n",
3471                                 pflashcomp[i].img_type);
3472                         return status;
3473                 }
3474         }
3475         return 0;
3476 }
3477
3478 static int be_flash_skyhawk(struct be_adapter *adapter,
3479                 const struct firmware *fw,
3480                 struct be_dma_mem *flash_cmd, int num_of_images)
3481 {
3482         int status = 0, i, filehdr_size = 0;
3483         int img_offset, img_size, img_optype, redboot;
3484         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3485         const u8 *p = fw->data;
3486         struct flash_section_info *fsec = NULL;
3487
3488         filehdr_size = sizeof(struct flash_file_hdr_g3);
3489         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3490         if (!fsec) {
3491                 dev_err(&adapter->pdev->dev,
3492                         "Invalid Cookie. UFI corrupted ?\n");
3493                 return -1;
3494         }
3495
3496         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3497                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3498                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3499
3500                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3501                 case IMAGE_FIRMWARE_iSCSI:
3502                         img_optype = OPTYPE_ISCSI_ACTIVE;
3503                         break;
3504                 case IMAGE_BOOT_CODE:
3505                         img_optype = OPTYPE_REDBOOT;
3506                         break;
3507                 case IMAGE_OPTION_ROM_ISCSI:
3508                         img_optype = OPTYPE_BIOS;
3509                         break;
3510                 case IMAGE_OPTION_ROM_PXE:
3511                         img_optype = OPTYPE_PXE_BIOS;
3512                         break;
3513                 case IMAGE_OPTION_ROM_FCoE:
3514                         img_optype = OPTYPE_FCOE_BIOS;
3515                         break;
3516                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3517                         img_optype = OPTYPE_ISCSI_BACKUP;
3518                         break;
3519                 case IMAGE_NCSI:
3520                         img_optype = OPTYPE_NCSI_FW;
3521                         break;
3522                 default:
3523                         continue;
3524                 }
3525
3526                 if (img_optype == OPTYPE_REDBOOT) {
3527                         redboot = be_flash_redboot(adapter, fw->data,
3528                                         img_offset, img_size,
3529                                         filehdr_size + img_hdrs_size);
3530                         if (!redboot)
3531                                 continue;
3532                 }
3533
3534                 p = fw->data;
3535                 p += filehdr_size + img_offset + img_hdrs_size;
3536                 if (p + img_size > fw->data + fw->size)
3537                         return -1;
3538
3539                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3540                 if (status) {
3541                         dev_err(&adapter->pdev->dev,
3542                                 "Flashing section type %d failed.\n",
3543                                 fsec->fsec_entry[i].type);
3544                         return status;
3545                 }
3546         }
3547         return 0;
3548 }
3549
3550 static int lancer_wait_idle(struct be_adapter *adapter)
3551 {
3552 #define SLIPORT_IDLE_TIMEOUT 30
3553         u32 reg_val;
3554         int status = 0, i;
3555
3556         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3557                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3558                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3559                         break;
3560
3561                 ssleep(1);
3562         }
3563
3564         if (i == SLIPORT_IDLE_TIMEOUT)
3565                 status = -1;
3566
3567         return status;
3568 }
3569
3570 static int lancer_fw_reset(struct be_adapter *adapter)
3571 {
3572         int status = 0;
3573
3574         status = lancer_wait_idle(adapter);
3575         if (status)
3576                 return status;
3577
3578         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3579                   PHYSDEV_CONTROL_OFFSET);
3580
3581         return status;
3582 }
3583
3584 static int lancer_fw_download(struct be_adapter *adapter,
3585                                 const struct firmware *fw)
3586 {
3587 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3588 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3589         struct be_dma_mem flash_cmd;
3590         const u8 *data_ptr = NULL;
3591         u8 *dest_image_ptr = NULL;
3592         size_t image_size = 0;
3593         u32 chunk_size = 0;
3594         u32 data_written = 0;
3595         u32 offset = 0;
3596         int status = 0;
3597         u8 add_status = 0;
3598         u8 change_status;
3599
3600         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3601                 dev_err(&adapter->pdev->dev,
3602                         "FW Image not properly aligned. "
3603                         "Length must be 4 byte aligned.\n");
3604                 status = -EINVAL;
3605                 goto lancer_fw_exit;
3606         }
3607
3608         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3609                                 + LANCER_FW_DOWNLOAD_CHUNK;
3610         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3611                                           &flash_cmd.dma, GFP_KERNEL);
3612         if (!flash_cmd.va) {
3613                 status = -ENOMEM;
3614                 goto lancer_fw_exit;
3615         }
3616
3617         dest_image_ptr = flash_cmd.va +
3618                                 sizeof(struct lancer_cmd_req_write_object);
3619         image_size = fw->size;
3620         data_ptr = fw->data;
3621
3622         while (image_size) {
3623                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3624
3625                 /* Copy the image chunk content. */
3626                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3627
3628                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3629                                                  chunk_size, offset,
3630                                                  LANCER_FW_DOWNLOAD_LOCATION,
3631                                                  &data_written, &change_status,
3632                                                  &add_status);
3633                 if (status)
3634                         break;
3635
3636                 offset += data_written;
3637                 data_ptr += data_written;
3638                 image_size -= data_written;
3639         }
3640
3641         if (!status) {
3642                 /* Commit the FW written */
3643                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3644                                                  0, offset,
3645                                                  LANCER_FW_DOWNLOAD_LOCATION,
3646                                                  &data_written, &change_status,
3647                                                  &add_status);
3648         }
3649
3650         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3651                                 flash_cmd.dma);
3652         if (status) {
3653                 dev_err(&adapter->pdev->dev,
3654                         "Firmware load error. "
3655                         "Status code: 0x%x Additional Status: 0x%x\n",
3656                         status, add_status);
3657                 goto lancer_fw_exit;
3658         }
3659
3660         if (change_status == LANCER_FW_RESET_NEEDED) {
3661                 status = lancer_fw_reset(adapter);
3662                 if (status) {
3663                         dev_err(&adapter->pdev->dev,
3664                                 "Adapter busy for FW reset.\n"
3665                                 "New FW will not be active.\n");
3666                         goto lancer_fw_exit;
3667                 }
3668         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3669                         dev_err(&adapter->pdev->dev,
3670                                 "System reboot required for new FW"
3671                                 " to be active\n");
3672         }
3673
3674         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3675 lancer_fw_exit:
3676         return status;
3677 }
3678
3679 #define UFI_TYPE2               2
3680 #define UFI_TYPE3               3
3681 #define UFI_TYPE3R              10
3682 #define UFI_TYPE4               4
3683 static int be_get_ufi_type(struct be_adapter *adapter,
3684                            struct flash_file_hdr_g3 *fhdr)
3685 {
3686         if (fhdr == NULL)
3687                 goto be_get_ufi_exit;
3688
3689         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3690                 return UFI_TYPE4;
3691         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3692                 if (fhdr->asic_type_rev == 0x10)
3693                         return UFI_TYPE3R;
3694                 else
3695                         return UFI_TYPE3;
3696         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3697                 return UFI_TYPE2;
3698
3699 be_get_ufi_exit:
3700         dev_err(&adapter->pdev->dev,
3701                 "UFI and Interface are not compatible for flashing\n");
3702         return -1;
3703 }
3704
3705 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3706 {
3707         struct flash_file_hdr_g3 *fhdr3;
3708         struct image_hdr *img_hdr_ptr = NULL;
3709         struct be_dma_mem flash_cmd;
3710         const u8 *p;
3711         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3712
3713         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3714         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3715                                           &flash_cmd.dma, GFP_KERNEL);
3716         if (!flash_cmd.va) {
3717                 status = -ENOMEM;
3718                 goto be_fw_exit;
3719         }
3720
3721         p = fw->data;
3722         fhdr3 = (struct flash_file_hdr_g3 *)p;
3723
3724         ufi_type = be_get_ufi_type(adapter, fhdr3);
3725
3726         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3727         for (i = 0; i < num_imgs; i++) {
3728                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3729                                 (sizeof(struct flash_file_hdr_g3) +
3730                                  i * sizeof(struct image_hdr)));
3731                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3732                         switch (ufi_type) {
3733                         case UFI_TYPE4:
3734                                 status = be_flash_skyhawk(adapter, fw,
3735                                                         &flash_cmd, num_imgs);
3736                                 break;
3737                         case UFI_TYPE3R:
3738                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3739                                                       num_imgs);
3740                                 break;
3741                         case UFI_TYPE3:
3742                                 /* Do not flash this ufi on BE3-R cards */
3743                                 if (adapter->asic_rev < 0x10)
3744                                         status = be_flash_BEx(adapter, fw,
3745                                                               &flash_cmd,
3746                                                               num_imgs);
3747                                 else {
3748                                         status = -1;
3749                                         dev_err(&adapter->pdev->dev,
3750                                                 "Can't load BE3 UFI on BE3R\n");
3751                                 }
3752                         }
3753                 }
3754         }
3755
3756         if (ufi_type == UFI_TYPE2)
3757                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3758         else if (ufi_type == -1)
3759                 status = -1;
3760
3761         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3762                           flash_cmd.dma);
3763         if (status) {
3764                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3765                 goto be_fw_exit;
3766         }
3767
3768         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3769
3770 be_fw_exit:
3771         return status;
3772 }
3773
3774 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3775 {
3776         const struct firmware *fw;
3777         int status;
3778
3779         if (!netif_running(adapter->netdev)) {
3780                 dev_err(&adapter->pdev->dev,
3781                         "Firmware load not allowed (interface is down)\n");
3782                 return -1;
3783         }
3784
3785         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3786         if (status)
3787                 goto fw_exit;
3788
3789         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3790
3791         if (lancer_chip(adapter))
3792                 status = lancer_fw_download(adapter, fw);
3793         else
3794                 status = be_fw_download(adapter, fw);
3795
3796 fw_exit:
3797         release_firmware(fw);
3798         return status;
3799 }
3800
3801 static const struct net_device_ops be_netdev_ops = {
3802         .ndo_open               = be_open,
3803         .ndo_stop               = be_close,
3804         .ndo_start_xmit         = be_xmit,
3805         .ndo_set_rx_mode        = be_set_rx_mode,
3806         .ndo_set_mac_address    = be_mac_addr_set,
3807         .ndo_change_mtu         = be_change_mtu,
3808         .ndo_get_stats64        = be_get_stats64,
3809         .ndo_validate_addr      = eth_validate_addr,
3810         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3811         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3812         .ndo_set_vf_mac         = be_set_vf_mac,
3813         .ndo_set_vf_vlan        = be_set_vf_vlan,
3814         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3815         .ndo_get_vf_config      = be_get_vf_config,
3816 #ifdef CONFIG_NET_POLL_CONTROLLER
3817         .ndo_poll_controller    = be_netpoll,
3818 #endif
3819 };
3820
3821 static void be_netdev_init(struct net_device *netdev)
3822 {
3823         struct be_adapter *adapter = netdev_priv(netdev);
3824         struct be_eq_obj *eqo;
3825         int i;
3826
3827         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3828                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3829                 NETIF_F_HW_VLAN_CTAG_TX;
3830         if (be_multi_rxq(adapter))
3831                 netdev->hw_features |= NETIF_F_RXHASH;
3832
3833         netdev->features |= netdev->hw_features |
3834                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3835
3836         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3837                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3838
3839         netdev->priv_flags |= IFF_UNICAST_FLT;
3840
3841         netdev->flags |= IFF_MULTICAST;
3842
3843         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3844
3845         netdev->netdev_ops = &be_netdev_ops;
3846
3847         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3848
3849         for_all_evt_queues(adapter, eqo, i)
3850                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3851 }
3852
3853 static void be_unmap_pci_bars(struct be_adapter *adapter)
3854 {
3855         if (adapter->csr)
3856                 pci_iounmap(adapter->pdev, adapter->csr);
3857         if (adapter->db)
3858                 pci_iounmap(adapter->pdev, adapter->db);
3859 }
3860
3861 static int db_bar(struct be_adapter *adapter)
3862 {
3863         if (lancer_chip(adapter) || !be_physfn(adapter))
3864                 return 0;
3865         else
3866                 return 4;
3867 }
3868
3869 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3870 {
3871         if (skyhawk_chip(adapter)) {
3872                 adapter->roce_db.size = 4096;
3873                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3874                                                               db_bar(adapter));
3875                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3876                                                                db_bar(adapter));
3877         }
3878         return 0;
3879 }
3880
3881 static int be_map_pci_bars(struct be_adapter *adapter)
3882 {
3883         u8 __iomem *addr;
3884         u32 sli_intf;
3885
3886         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3887         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3888                                 SLI_INTF_IF_TYPE_SHIFT;
3889
3890         if (BEx_chip(adapter) && be_physfn(adapter)) {
3891                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3892                 if (adapter->csr == NULL)
3893                         return -ENOMEM;
3894         }
3895
3896         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3897         if (addr == NULL)
3898                 goto pci_map_err;
3899         adapter->db = addr;
3900
3901         be_roce_map_pci_bars(adapter);
3902         return 0;
3903
3904 pci_map_err:
3905         be_unmap_pci_bars(adapter);
3906         return -ENOMEM;
3907 }
3908
3909 static void be_ctrl_cleanup(struct be_adapter *adapter)
3910 {
3911         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3912
3913         be_unmap_pci_bars(adapter);
3914
3915         if (mem->va)
3916                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3917                                   mem->dma);
3918
3919         mem = &adapter->rx_filter;
3920         if (mem->va)
3921                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3922                                   mem->dma);
3923 }
3924
3925 static int be_ctrl_init(struct be_adapter *adapter)
3926 {
3927         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3928         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3929         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3930         u32 sli_intf;
3931         int status;
3932
3933         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3934         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3935                                  SLI_INTF_FAMILY_SHIFT;
3936         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3937
3938         status = be_map_pci_bars(adapter);
3939         if (status)
3940                 goto done;
3941
3942         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3943         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3944                                                 mbox_mem_alloc->size,
3945                                                 &mbox_mem_alloc->dma,
3946                                                 GFP_KERNEL);
3947         if (!mbox_mem_alloc->va) {
3948                 status = -ENOMEM;
3949                 goto unmap_pci_bars;
3950         }
3951         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3952         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3953         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3954         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3955
3956         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3957         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3958                                            &rx_filter->dma,
3959                                            GFP_KERNEL | __GFP_ZERO);
3960         if (rx_filter->va == NULL) {
3961                 status = -ENOMEM;
3962                 goto free_mbox;
3963         }
3964
3965         mutex_init(&adapter->mbox_lock);
3966         spin_lock_init(&adapter->mcc_lock);
3967         spin_lock_init(&adapter->mcc_cq_lock);
3968
3969         init_completion(&adapter->flash_compl);
3970         pci_save_state(adapter->pdev);
3971         return 0;
3972
3973 free_mbox:
3974         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3975                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3976
3977 unmap_pci_bars:
3978         be_unmap_pci_bars(adapter);
3979
3980 done:
3981         return status;
3982 }
3983
3984 static void be_stats_cleanup(struct be_adapter *adapter)
3985 {
3986         struct be_dma_mem *cmd = &adapter->stats_cmd;
3987
3988         if (cmd->va)
3989                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3990                                   cmd->va, cmd->dma);
3991 }
3992
3993 static int be_stats_init(struct be_adapter *adapter)
3994 {
3995         struct be_dma_mem *cmd = &adapter->stats_cmd;
3996
3997         if (lancer_chip(adapter))
3998                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3999         else if (BE2_chip(adapter))
4000                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4001         else
4002                 /* BE3 and Skyhawk */
4003                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4004
4005         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4006                                      GFP_KERNEL | __GFP_ZERO);
4007         if (cmd->va == NULL)
4008                 return -1;
4009         return 0;
4010 }
4011
4012 static void be_remove(struct pci_dev *pdev)
4013 {
4014         struct be_adapter *adapter = pci_get_drvdata(pdev);
4015
4016         if (!adapter)
4017                 return;
4018
4019         be_roce_dev_remove(adapter);
4020         be_intr_set(adapter, false);
4021
4022         cancel_delayed_work_sync(&adapter->func_recovery_work);
4023
4024         unregister_netdev(adapter->netdev);
4025
4026         be_clear(adapter);
4027
4028         /* tell fw we're done with firing cmds */
4029         be_cmd_fw_clean(adapter);
4030
4031         be_stats_cleanup(adapter);
4032
4033         be_ctrl_cleanup(adapter);
4034
4035         pci_disable_pcie_error_reporting(pdev);
4036
4037         pci_set_drvdata(pdev, NULL);
4038         pci_release_regions(pdev);
4039         pci_disable_device(pdev);
4040
4041         free_netdev(adapter->netdev);
4042 }
4043
4044 bool be_is_wol_supported(struct be_adapter *adapter)
4045 {
4046         return ((adapter->wol_cap & BE_WOL_CAP) &&
4047                 !be_is_wol_excluded(adapter)) ? true : false;
4048 }
4049
4050 u32 be_get_fw_log_level(struct be_adapter *adapter)
4051 {
4052         struct be_dma_mem extfat_cmd;
4053         struct be_fat_conf_params *cfgs;
4054         int status;
4055         u32 level = 0;
4056         int j;
4057
4058         if (lancer_chip(adapter))
4059                 return 0;
4060
4061         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4062         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4063         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4064                                              &extfat_cmd.dma);
4065
4066         if (!extfat_cmd.va) {
4067                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4068                         __func__);
4069                 goto err;
4070         }
4071
4072         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4073         if (!status) {
4074                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4075                                                 sizeof(struct be_cmd_resp_hdr));
4076                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4077                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4078                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4079                 }
4080         }
4081         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4082                             extfat_cmd.dma);
4083 err:
4084         return level;
4085 }
4086
4087 static int be_get_initial_config(struct be_adapter *adapter)
4088 {
4089         int status;
4090         u32 level;
4091
4092         status = be_cmd_get_cntl_attributes(adapter);
4093         if (status)
4094                 return status;
4095
4096         status = be_cmd_get_acpi_wol_cap(adapter);
4097         if (status) {
4098                 /* in case of a failure to get wol capabillities
4099                  * check the exclusion list to determine WOL capability */
4100                 if (!be_is_wol_excluded(adapter))
4101                         adapter->wol_cap |= BE_WOL_CAP;
4102         }
4103
4104         if (be_is_wol_supported(adapter))
4105                 adapter->wol = true;
4106
4107         /* Must be a power of 2 or else MODULO will BUG_ON */
4108         adapter->be_get_temp_freq = 64;
4109
4110         level = be_get_fw_log_level(adapter);
4111         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4112
4113         return 0;
4114 }
4115
4116 static int lancer_recover_func(struct be_adapter *adapter)
4117 {
4118         int status;
4119
4120         status = lancer_test_and_set_rdy_state(adapter);
4121         if (status)
4122                 goto err;
4123
4124         if (netif_running(adapter->netdev))
4125                 be_close(adapter->netdev);
4126
4127         be_clear(adapter);
4128
4129         adapter->hw_error = false;
4130         adapter->fw_timeout = false;
4131
4132         status = be_setup(adapter);
4133         if (status)
4134                 goto err;
4135
4136         if (netif_running(adapter->netdev)) {
4137                 status = be_open(adapter->netdev);
4138                 if (status)
4139                         goto err;
4140         }
4141
4142         dev_err(&adapter->pdev->dev,
4143                 "Adapter SLIPORT recovery succeeded\n");
4144         return 0;
4145 err:
4146         if (adapter->eeh_error)
4147                 dev_err(&adapter->pdev->dev,
4148                         "Adapter SLIPORT recovery failed\n");
4149
4150         return status;
4151 }
4152
4153 static void be_func_recovery_task(struct work_struct *work)
4154 {
4155         struct be_adapter *adapter =
4156                 container_of(work, struct be_adapter,  func_recovery_work.work);
4157         int status;
4158
4159         be_detect_error(adapter);
4160
4161         if (adapter->hw_error && lancer_chip(adapter)) {
4162
4163                 if (adapter->eeh_error)
4164                         goto out;
4165
4166                 rtnl_lock();
4167                 netif_device_detach(adapter->netdev);
4168                 rtnl_unlock();
4169
4170                 status = lancer_recover_func(adapter);
4171
4172                 if (!status)
4173                         netif_device_attach(adapter->netdev);
4174         }
4175
4176 out:
4177         schedule_delayed_work(&adapter->func_recovery_work,
4178                               msecs_to_jiffies(1000));
4179 }
4180
4181 static void be_worker(struct work_struct *work)
4182 {
4183         struct be_adapter *adapter =
4184                 container_of(work, struct be_adapter, work.work);
4185         struct be_rx_obj *rxo;
4186         struct be_eq_obj *eqo;
4187         int i;
4188
4189         /* when interrupts are not yet enabled, just reap any pending
4190         * mcc completions */
4191         if (!netif_running(adapter->netdev)) {
4192                 local_bh_disable();
4193                 be_process_mcc(adapter);
4194                 local_bh_enable();
4195                 goto reschedule;
4196         }
4197
4198         if (!adapter->stats_cmd_sent) {
4199                 if (lancer_chip(adapter))
4200                         lancer_cmd_get_pport_stats(adapter,
4201                                                 &adapter->stats_cmd);
4202                 else
4203                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4204         }
4205
4206         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4207                 be_cmd_get_die_temperature(adapter);
4208
4209         for_all_rx_queues(adapter, rxo, i) {
4210                 if (rxo->rx_post_starved) {
4211                         rxo->rx_post_starved = false;
4212                         be_post_rx_frags(rxo, GFP_KERNEL);
4213                 }
4214         }
4215
4216         for_all_evt_queues(adapter, eqo, i)
4217                 be_eqd_update(adapter, eqo);
4218
4219 reschedule:
4220         adapter->work_counter++;
4221         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4222 }
4223
4224 static bool be_reset_required(struct be_adapter *adapter)
4225 {
4226         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4227 }
4228
4229 static char *mc_name(struct be_adapter *adapter)
4230 {
4231         if (adapter->function_mode & FLEX10_MODE)
4232                 return "FLEX10";
4233         else if (adapter->function_mode & VNIC_MODE)
4234                 return "vNIC";
4235         else if (adapter->function_mode & UMC_ENABLED)
4236                 return "UMC";
4237         else
4238                 return "";
4239 }
4240
4241 static inline char *func_name(struct be_adapter *adapter)
4242 {
4243         return be_physfn(adapter) ? "PF" : "VF";
4244 }
4245
4246 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4247 {
4248         int status = 0;
4249         struct be_adapter *adapter;
4250         struct net_device *netdev;
4251         char port_name;
4252
4253         status = pci_enable_device(pdev);
4254         if (status)
4255                 goto do_none;
4256
4257         status = pci_request_regions(pdev, DRV_NAME);
4258         if (status)
4259                 goto disable_dev;
4260         pci_set_master(pdev);
4261
4262         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4263         if (netdev == NULL) {
4264                 status = -ENOMEM;
4265                 goto rel_reg;
4266         }
4267         adapter = netdev_priv(netdev);
4268         adapter->pdev = pdev;
4269         pci_set_drvdata(pdev, adapter);
4270         adapter->netdev = netdev;
4271         SET_NETDEV_DEV(netdev, &pdev->dev);
4272
4273         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4274         if (!status) {
4275                 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4276                 if (status < 0) {
4277                         dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4278                         goto free_netdev;
4279                 }
4280                 netdev->features |= NETIF_F_HIGHDMA;
4281         } else {
4282                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4283                 if (status) {
4284                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4285                         goto free_netdev;
4286                 }
4287         }
4288
4289         status = pci_enable_pcie_error_reporting(pdev);
4290         if (status)
4291                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4292
4293         status = be_ctrl_init(adapter);
4294         if (status)
4295                 goto free_netdev;
4296
4297         /* sync up with fw's ready state */
4298         if (be_physfn(adapter)) {
4299                 status = be_fw_wait_ready(adapter);
4300                 if (status)
4301                         goto ctrl_clean;
4302         }
4303
4304         if (be_reset_required(adapter)) {
4305                 status = be_cmd_reset_function(adapter);
4306                 if (status)
4307                         goto ctrl_clean;
4308
4309                 /* Wait for interrupts to quiesce after an FLR */
4310                 msleep(100);
4311         }
4312
4313         /* Allow interrupts for other ULPs running on NIC function */
4314         be_intr_set(adapter, true);
4315
4316         /* tell fw we're ready to fire cmds */
4317         status = be_cmd_fw_init(adapter);
4318         if (status)
4319                 goto ctrl_clean;
4320
4321         status = be_stats_init(adapter);
4322         if (status)
4323                 goto ctrl_clean;
4324
4325         status = be_get_initial_config(adapter);
4326         if (status)
4327                 goto stats_clean;
4328
4329         INIT_DELAYED_WORK(&adapter->work, be_worker);
4330         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4331         adapter->rx_fc = adapter->tx_fc = true;
4332
4333         status = be_setup(adapter);
4334         if (status)
4335                 goto stats_clean;
4336
4337         be_netdev_init(netdev);
4338         status = register_netdev(netdev);
4339         if (status != 0)
4340                 goto unsetup;
4341
4342         be_roce_dev_add(adapter);
4343
4344         schedule_delayed_work(&adapter->func_recovery_work,
4345                               msecs_to_jiffies(1000));
4346
4347         be_cmd_query_port_name(adapter, &port_name);
4348
4349         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4350                  func_name(adapter), mc_name(adapter), port_name);
4351
4352         return 0;
4353
4354 unsetup:
4355         be_clear(adapter);
4356 stats_clean:
4357         be_stats_cleanup(adapter);
4358 ctrl_clean:
4359         be_ctrl_cleanup(adapter);
4360 free_netdev:
4361         free_netdev(netdev);
4362         pci_set_drvdata(pdev, NULL);
4363 rel_reg:
4364         pci_release_regions(pdev);
4365 disable_dev:
4366         pci_disable_device(pdev);
4367 do_none:
4368         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4369         return status;
4370 }
4371
4372 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4373 {
4374         struct be_adapter *adapter = pci_get_drvdata(pdev);
4375         struct net_device *netdev =  adapter->netdev;
4376
4377         if (adapter->wol)
4378                 be_setup_wol(adapter, true);
4379
4380         cancel_delayed_work_sync(&adapter->func_recovery_work);
4381
4382         netif_device_detach(netdev);
4383         if (netif_running(netdev)) {
4384                 rtnl_lock();
4385                 be_close(netdev);
4386                 rtnl_unlock();
4387         }
4388         be_clear(adapter);
4389
4390         pci_save_state(pdev);
4391         pci_disable_device(pdev);
4392         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4393         return 0;
4394 }
4395
4396 static int be_resume(struct pci_dev *pdev)
4397 {
4398         int status = 0;
4399         struct be_adapter *adapter = pci_get_drvdata(pdev);
4400         struct net_device *netdev =  adapter->netdev;
4401
4402         netif_device_detach(netdev);
4403
4404         status = pci_enable_device(pdev);
4405         if (status)
4406                 return status;
4407
4408         pci_set_power_state(pdev, 0);
4409         pci_restore_state(pdev);
4410
4411         /* tell fw we're ready to fire cmds */
4412         status = be_cmd_fw_init(adapter);
4413         if (status)
4414                 return status;
4415
4416         be_setup(adapter);
4417         if (netif_running(netdev)) {
4418                 rtnl_lock();
4419                 be_open(netdev);
4420                 rtnl_unlock();
4421         }
4422
4423         schedule_delayed_work(&adapter->func_recovery_work,
4424                               msecs_to_jiffies(1000));
4425         netif_device_attach(netdev);
4426
4427         if (adapter->wol)
4428                 be_setup_wol(adapter, false);
4429
4430         return 0;
4431 }
4432
4433 /*
4434  * An FLR will stop BE from DMAing any data.
4435  */
4436 static void be_shutdown(struct pci_dev *pdev)
4437 {
4438         struct be_adapter *adapter = pci_get_drvdata(pdev);
4439
4440         if (!adapter)
4441                 return;
4442
4443         cancel_delayed_work_sync(&adapter->work);
4444         cancel_delayed_work_sync(&adapter->func_recovery_work);
4445
4446         netif_device_detach(adapter->netdev);
4447
4448         be_cmd_reset_function(adapter);
4449
4450         pci_disable_device(pdev);
4451 }
4452
4453 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4454                                 pci_channel_state_t state)
4455 {
4456         struct be_adapter *adapter = pci_get_drvdata(pdev);
4457         struct net_device *netdev =  adapter->netdev;
4458
4459         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4460
4461         adapter->eeh_error = true;
4462
4463         cancel_delayed_work_sync(&adapter->func_recovery_work);
4464
4465         rtnl_lock();
4466         netif_device_detach(netdev);
4467         rtnl_unlock();
4468
4469         if (netif_running(netdev)) {
4470                 rtnl_lock();
4471                 be_close(netdev);
4472                 rtnl_unlock();
4473         }
4474         be_clear(adapter);
4475
4476         if (state == pci_channel_io_perm_failure)
4477                 return PCI_ERS_RESULT_DISCONNECT;
4478
4479         pci_disable_device(pdev);
4480
4481         /* The error could cause the FW to trigger a flash debug dump.
4482          * Resetting the card while flash dump is in progress
4483          * can cause it not to recover; wait for it to finish.
4484          * Wait only for first function as it is needed only once per
4485          * adapter.
4486          */
4487         if (pdev->devfn == 0)
4488                 ssleep(30);
4489
4490         return PCI_ERS_RESULT_NEED_RESET;
4491 }
4492
4493 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4494 {
4495         struct be_adapter *adapter = pci_get_drvdata(pdev);
4496         int status;
4497
4498         dev_info(&adapter->pdev->dev, "EEH reset\n");
4499         be_clear_all_error(adapter);
4500
4501         status = pci_enable_device(pdev);
4502         if (status)
4503                 return PCI_ERS_RESULT_DISCONNECT;
4504
4505         pci_set_master(pdev);
4506         pci_set_power_state(pdev, 0);
4507         pci_restore_state(pdev);
4508
4509         /* Check if card is ok and fw is ready */
4510         dev_info(&adapter->pdev->dev,
4511                  "Waiting for FW to be ready after EEH reset\n");
4512         status = be_fw_wait_ready(adapter);
4513         if (status)
4514                 return PCI_ERS_RESULT_DISCONNECT;
4515
4516         pci_cleanup_aer_uncorrect_error_status(pdev);
4517         return PCI_ERS_RESULT_RECOVERED;
4518 }
4519
4520 static void be_eeh_resume(struct pci_dev *pdev)
4521 {
4522         int status = 0;
4523         struct be_adapter *adapter = pci_get_drvdata(pdev);
4524         struct net_device *netdev =  adapter->netdev;
4525
4526         dev_info(&adapter->pdev->dev, "EEH resume\n");
4527
4528         pci_save_state(pdev);
4529
4530         status = be_cmd_reset_function(adapter);
4531         if (status)
4532                 goto err;
4533
4534         /* tell fw we're ready to fire cmds */
4535         status = be_cmd_fw_init(adapter);
4536         if (status)
4537                 goto err;
4538
4539         status = be_setup(adapter);
4540         if (status)
4541                 goto err;
4542
4543         if (netif_running(netdev)) {
4544                 status = be_open(netdev);
4545                 if (status)
4546                         goto err;
4547         }
4548
4549         schedule_delayed_work(&adapter->func_recovery_work,
4550                               msecs_to_jiffies(1000));
4551         netif_device_attach(netdev);
4552         return;
4553 err:
4554         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4555 }
4556
4557 static const struct pci_error_handlers be_eeh_handlers = {
4558         .error_detected = be_eeh_err_detected,
4559         .slot_reset = be_eeh_reset,
4560         .resume = be_eeh_resume,
4561 };
4562
4563 static struct pci_driver be_driver = {
4564         .name = DRV_NAME,
4565         .id_table = be_dev_ids,
4566         .probe = be_probe,
4567         .remove = be_remove,
4568         .suspend = be_suspend,
4569         .resume = be_resume,
4570         .shutdown = be_shutdown,
4571         .err_handler = &be_eeh_handlers
4572 };
4573
4574 static int __init be_init_module(void)
4575 {
4576         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4577             rx_frag_size != 2048) {
4578                 printk(KERN_WARNING DRV_NAME
4579                         " : Module param rx_frag_size must be 2048/4096/8192."
4580                         " Using 2048\n");
4581                 rx_frag_size = 2048;
4582         }
4583
4584         return pci_register_driver(&be_driver);
4585 }
4586 module_init(be_init_module);
4587
4588 static void __exit be_exit_module(void)
4589 {
4590         pci_unregister_driver(&be_driver);
4591 }
4592 module_exit(be_exit_module);