73b0009c99fb7326dd55e66a20e592a377df10d3
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL | __GFP_ZERO);
150         if (!mem->va)
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176         int status = 0;
177
178         /* On lancer interrupts can't be controlled via this register */
179         if (lancer_chip(adapter))
180                 return;
181
182         if (adapter->eeh_error)
183                 return;
184
185         status = be_cmd_intr_set(adapter, enable);
186         if (status)
187                 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192         u32 val = 0;
193         val |= qid & DB_RQ_RING_ID_MASK;
194         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196         wmb();
197         iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201                           u16 posted)
202 {
203         u32 val = 0;
204         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207         wmb();
208         iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212                 bool arm, bool clear_int, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_EQ_RING_ID_MASK;
216         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_error)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_EQ_REARM_SHIFT;
224         if (clear_int)
225                 val |= 1 << DB_EQ_CLR_SHIFT;
226         val |= 1 << DB_EQ_EVNT_SHIFT;
227         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228         iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233         u32 val = 0;
234         val |= qid & DB_CQ_RING_ID_MASK;
235         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238         if (adapter->eeh_error)
239                 return;
240
241         if (arm)
242                 val |= 1 << DB_CQ_REARM_SHIFT;
243         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244         iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249         struct be_adapter *adapter = netdev_priv(netdev);
250         struct device *dev = &adapter->pdev->dev;
251         struct sockaddr *addr = p;
252         int status;
253         u8 mac[ETH_ALEN];
254         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
255
256         if (!is_valid_ether_addr(addr->sa_data))
257                 return -EADDRNOTAVAIL;
258
259         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
260          * privilege or if PF did not provision the new MAC address.
261          * On BE3, this cmd will always fail if the VF doesn't have the
262          * FILTMGMT privilege. This failure is OK, only if the PF programmed
263          * the MAC for the VF.
264          */
265         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
266                                  adapter->if_handle, &adapter->pmac_id[0], 0);
267         if (!status) {
268                 curr_pmac_id = adapter->pmac_id[0];
269
270                 /* Delete the old programmed MAC. This call may fail if the
271                  * old MAC was already deleted by the PF driver.
272                  */
273                 if (adapter->pmac_id[0] != old_pmac_id)
274                         be_cmd_pmac_del(adapter, adapter->if_handle,
275                                         old_pmac_id, 0);
276         }
277
278         /* Decide if the new MAC is successfully activated only after
279          * querying the FW
280          */
281         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
282         if (status)
283                 goto err;
284
285         /* The MAC change did not happen, either due to lack of privilege
286          * or PF didn't pre-provision.
287          */
288         if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
289                 status = -EPERM;
290                 goto err;
291         }
292
293         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
294         dev_info(dev, "MAC address changed to %pM\n", mac);
295         return 0;
296 err:
297         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
298         return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304         if (BE2_chip(adapter)) {
305                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307                 return &cmd->hw_stats;
308         } else  {
309                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318         if (BE2_chip(adapter)) {
319                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321                 return &hw_stats->erx;
322         } else {
323                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325                 return &hw_stats->erx;
326         }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334         struct be_port_rxf_stats_v0 *port_stats =
335                                         &rxf_stats->port[adapter->port_num];
336         struct be_drv_stats *drvs = &adapter->drv_stats;
337
338         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339         drvs->rx_pause_frames = port_stats->rx_pause_frames;
340         drvs->rx_crc_errors = port_stats->rx_crc_errors;
341         drvs->rx_control_frames = port_stats->rx_control_frames;
342         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354         drvs->rx_dropped_header_too_small =
355                 port_stats->rx_dropped_header_too_small;
356         drvs->rx_address_filtered =
357                                         port_stats->rx_address_filtered +
358                                         port_stats->rx_vlan_filtered;
359         drvs->rx_alignment_symbol_errors =
360                 port_stats->rx_alignment_symbol_errors;
361
362         drvs->tx_pauseframes = port_stats->tx_pauseframes;
363         drvs->tx_controlframes = port_stats->tx_controlframes;
364
365         if (adapter->port_num)
366                 drvs->jabber_events = rxf_stats->port1_jabber_events;
367         else
368                 drvs->jabber_events = rxf_stats->port0_jabber_events;
369         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371         drvs->forwarded_packets = rxf_stats->forwarded_packets;
372         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383         struct be_port_rxf_stats_v1 *port_stats =
384                                         &rxf_stats->port[adapter->port_num];
385         struct be_drv_stats *drvs = &adapter->drv_stats;
386
387         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390         drvs->rx_pause_frames = port_stats->rx_pause_frames;
391         drvs->rx_crc_errors = port_stats->rx_crc_errors;
392         drvs->rx_control_frames = port_stats->rx_control_frames;
393         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403         drvs->rx_dropped_header_too_small =
404                 port_stats->rx_dropped_header_too_small;
405         drvs->rx_input_fifo_overflow_drop =
406                 port_stats->rx_input_fifo_overflow_drop;
407         drvs->rx_address_filtered = port_stats->rx_address_filtered;
408         drvs->rx_alignment_symbol_errors =
409                 port_stats->rx_alignment_symbol_errors;
410         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411         drvs->tx_pauseframes = port_stats->tx_pauseframes;
412         drvs->tx_controlframes = port_stats->tx_controlframes;
413         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
414         drvs->jabber_events = port_stats->jabber_events;
415         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
416         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
417         drvs->forwarded_packets = rxf_stats->forwarded_packets;
418         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
419         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
421         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422 }
423
424 static void populate_lancer_stats(struct be_adapter *adapter)
425 {
426
427         struct be_drv_stats *drvs = &adapter->drv_stats;
428         struct lancer_pport_stats *pport_stats =
429                                         pport_stats_from_cmd(adapter);
430
431         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
435         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
436         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
437         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441         drvs->rx_dropped_tcp_length =
442                                 pport_stats->rx_dropped_invalid_tcp_length;
443         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446         drvs->rx_dropped_header_too_small =
447                                 pport_stats->rx_dropped_header_too_small;
448         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
449         drvs->rx_address_filtered =
450                                         pport_stats->rx_address_filtered +
451                                         pport_stats->rx_vlan_filtered;
452         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
453         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
454         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
456         drvs->jabber_events = pport_stats->rx_jabbers;
457         drvs->forwarded_packets = pport_stats->num_forwards_lo;
458         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
459         drvs->rx_drops_too_many_frags =
460                                 pport_stats->rx_drops_too_many_frags_lo;
461 }
462
463 static void accumulate_16bit_val(u32 *acc, u16 val)
464 {
465 #define lo(x)                   (x & 0xFFFF)
466 #define hi(x)                   (x & 0xFFFF0000)
467         bool wrapped = val < lo(*acc);
468         u32 newacc = hi(*acc) + val;
469
470         if (wrapped)
471                 newacc += 65536;
472         ACCESS_ONCE(*acc) = newacc;
473 }
474
475 void populate_erx_stats(struct be_adapter *adapter,
476                         struct be_rx_obj *rxo,
477                         u32 erx_stat)
478 {
479         if (!BEx_chip(adapter))
480                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481         else
482                 /* below erx HW counter can actually wrap around after
483                  * 65535. Driver accumulates a 32-bit value
484                  */
485                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486                                      (u16)erx_stat);
487 }
488
489 void be_parse_stats(struct be_adapter *adapter)
490 {
491         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492         struct be_rx_obj *rxo;
493         int i;
494         u32 erx_stat;
495
496         if (lancer_chip(adapter)) {
497                 populate_lancer_stats(adapter);
498         } else {
499                 if (BE2_chip(adapter))
500                         populate_be_v0_stats(adapter);
501                 else
502                         /* for BE3 and Skyhawk */
503                         populate_be_v1_stats(adapter);
504
505                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506                 for_all_rx_queues(adapter, rxo, i) {
507                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508                         populate_erx_stats(adapter, rxo, erx_stat);
509                 }
510         }
511 }
512
513 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514                                         struct rtnl_link_stats64 *stats)
515 {
516         struct be_adapter *adapter = netdev_priv(netdev);
517         struct be_drv_stats *drvs = &adapter->drv_stats;
518         struct be_rx_obj *rxo;
519         struct be_tx_obj *txo;
520         u64 pkts, bytes;
521         unsigned int start;
522         int i;
523
524         for_all_rx_queues(adapter, rxo, i) {
525                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526                 do {
527                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528                         pkts = rx_stats(rxo)->rx_pkts;
529                         bytes = rx_stats(rxo)->rx_bytes;
530                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531                 stats->rx_packets += pkts;
532                 stats->rx_bytes += bytes;
533                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535                                         rx_stats(rxo)->rx_drops_no_frags;
536         }
537
538         for_all_tx_queues(adapter, txo, i) {
539                 const struct be_tx_stats *tx_stats = tx_stats(txo);
540                 do {
541                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542                         pkts = tx_stats(txo)->tx_pkts;
543                         bytes = tx_stats(txo)->tx_bytes;
544                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545                 stats->tx_packets += pkts;
546                 stats->tx_bytes += bytes;
547         }
548
549         /* bad pkts received */
550         stats->rx_errors = drvs->rx_crc_errors +
551                 drvs->rx_alignment_symbol_errors +
552                 drvs->rx_in_range_errors +
553                 drvs->rx_out_range_errors +
554                 drvs->rx_frame_too_long +
555                 drvs->rx_dropped_too_small +
556                 drvs->rx_dropped_too_short +
557                 drvs->rx_dropped_header_too_small +
558                 drvs->rx_dropped_tcp_length +
559                 drvs->rx_dropped_runt;
560
561         /* detailed rx errors */
562         stats->rx_length_errors = drvs->rx_in_range_errors +
563                 drvs->rx_out_range_errors +
564                 drvs->rx_frame_too_long;
565
566         stats->rx_crc_errors = drvs->rx_crc_errors;
567
568         /* frame alignment errors */
569         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
570
571         /* receiver fifo overrun */
572         /* drops_no_pbuf is no per i/f, it's per BE card */
573         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
574                                 drvs->rx_input_fifo_overflow_drop +
575                                 drvs->rx_drops_no_pbuf;
576         return stats;
577 }
578
579 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
580 {
581         struct net_device *netdev = adapter->netdev;
582
583         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
584                 netif_carrier_off(netdev);
585                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
586         }
587
588         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589                 netif_carrier_on(netdev);
590         else
591                 netif_carrier_off(netdev);
592 }
593
594 static void be_tx_stats_update(struct be_tx_obj *txo,
595                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
596 {
597         struct be_tx_stats *stats = tx_stats(txo);
598
599         u64_stats_update_begin(&stats->sync);
600         stats->tx_reqs++;
601         stats->tx_wrbs += wrb_cnt;
602         stats->tx_bytes += copied;
603         stats->tx_pkts += (gso_segs ? gso_segs : 1);
604         if (stopped)
605                 stats->tx_stops++;
606         u64_stats_update_end(&stats->sync);
607 }
608
609 /* Determine number of WRB entries needed to xmit data in an skb */
610 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611                                                                 bool *dummy)
612 {
613         int cnt = (skb->len > skb->data_len);
614
615         cnt += skb_shinfo(skb)->nr_frags;
616
617         /* to account for hdr wrb */
618         cnt++;
619         if (lancer_chip(adapter) || !(cnt & 1)) {
620                 *dummy = false;
621         } else {
622                 /* add a dummy to make it an even num */
623                 cnt++;
624                 *dummy = true;
625         }
626         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627         return cnt;
628 }
629
630 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631 {
632         wrb->frag_pa_hi = upper_32_bits(addr);
633         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
635         wrb->rsvd0 = 0;
636 }
637
638 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639                                         struct sk_buff *skb)
640 {
641         u8 vlan_prio;
642         u16 vlan_tag;
643
644         vlan_tag = vlan_tx_tag_get(skb);
645         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646         /* If vlan priority provided by OS is NOT in available bmap */
647         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649                                 adapter->recommended_prio;
650
651         return vlan_tag;
652 }
653
654 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
655                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
656 {
657         u16 vlan_tag;
658
659         memset(hdr, 0, sizeof(*hdr));
660
661         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
663         if (skb_is_gso(skb)) {
664                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666                         hdr, skb_shinfo(skb)->gso_size);
667                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
668                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
669         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670                 if (is_tcp_pkt(skb))
671                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672                 else if (is_udp_pkt(skb))
673                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674         }
675
676         if (vlan_tx_tag_present(skb)) {
677                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
678                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
679                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
680         }
681
682         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
684         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
685         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687 }
688
689 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
690                 bool unmap_single)
691 {
692         dma_addr_t dma;
693
694         be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
697         if (wrb->frag_len) {
698                 if (unmap_single)
699                         dma_unmap_single(dev, dma, wrb->frag_len,
700                                          DMA_TO_DEVICE);
701                 else
702                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
703         }
704 }
705
706 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
707                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708                 bool skip_hw_vlan)
709 {
710         dma_addr_t busaddr;
711         int i, copied = 0;
712         struct device *dev = &adapter->pdev->dev;
713         struct sk_buff *first_skb = skb;
714         struct be_eth_wrb *wrb;
715         struct be_eth_hdr_wrb *hdr;
716         bool map_single = false;
717         u16 map_head;
718
719         hdr = queue_head_node(txq);
720         queue_head_inc(txq);
721         map_head = txq->head;
722
723         if (skb->len > skb->data_len) {
724                 int len = skb_headlen(skb);
725                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726                 if (dma_mapping_error(dev, busaddr))
727                         goto dma_err;
728                 map_single = true;
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, busaddr, len);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733                 copied += len;
734         }
735
736         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
737                 const struct skb_frag_struct *frag =
738                         &skb_shinfo(skb)->frags[i];
739                 busaddr = skb_frag_dma_map(dev, frag, 0,
740                                            skb_frag_size(frag), DMA_TO_DEVICE);
741                 if (dma_mapping_error(dev, busaddr))
742                         goto dma_err;
743                 wrb = queue_head_node(txq);
744                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
745                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746                 queue_head_inc(txq);
747                 copied += skb_frag_size(frag);
748         }
749
750         if (dummy_wrb) {
751                 wrb = queue_head_node(txq);
752                 wrb_fill(wrb, 0, 0);
753                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754                 queue_head_inc(txq);
755         }
756
757         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
758         be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760         return copied;
761 dma_err:
762         txq->head = map_head;
763         while (copied) {
764                 wrb = queue_head_node(txq);
765                 unmap_tx_frag(dev, wrb, map_single);
766                 map_single = false;
767                 copied -= wrb->frag_len;
768                 queue_head_inc(txq);
769         }
770         return 0;
771 }
772
773 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
774                                              struct sk_buff *skb,
775                                              bool *skip_hw_vlan)
776 {
777         u16 vlan_tag = 0;
778
779         skb = skb_share_check(skb, GFP_ATOMIC);
780         if (unlikely(!skb))
781                 return skb;
782
783         if (vlan_tx_tag_present(skb))
784                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785
786         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
787                 if (!vlan_tag)
788                         vlan_tag = adapter->pvid;
789                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
790                  * skip VLAN insertion
791                  */
792                 if (skip_hw_vlan)
793                         *skip_hw_vlan = true;
794         }
795
796         if (vlan_tag) {
797                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
798                 if (unlikely(!skb))
799                         return skb;
800                 skb->vlan_tci = 0;
801         }
802
803         /* Insert the outer VLAN, if any */
804         if (adapter->qnq_vid) {
805                 vlan_tag = adapter->qnq_vid;
806                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
807                 if (unlikely(!skb))
808                         return skb;
809                 if (skip_hw_vlan)
810                         *skip_hw_vlan = true;
811         }
812
813         return skb;
814 }
815
816 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
817 {
818         struct ethhdr *eh = (struct ethhdr *)skb->data;
819         u16 offset = ETH_HLEN;
820
821         if (eh->h_proto == htons(ETH_P_IPV6)) {
822                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
823
824                 offset += sizeof(struct ipv6hdr);
825                 if (ip6h->nexthdr != NEXTHDR_TCP &&
826                     ip6h->nexthdr != NEXTHDR_UDP) {
827                         struct ipv6_opt_hdr *ehdr =
828                                 (struct ipv6_opt_hdr *) (skb->data + offset);
829
830                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
831                         if (ehdr->hdrlen == 0xff)
832                                 return true;
833                 }
834         }
835         return false;
836 }
837
838 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
839 {
840         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
841 }
842
843 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
844                                 struct sk_buff *skb)
845 {
846         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
847 }
848
849 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
850                                            struct sk_buff *skb,
851                                            bool *skip_hw_vlan)
852 {
853         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
854         unsigned int eth_hdr_len;
855         struct iphdr *ip;
856
857         /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
858          * may cause a transmit stall on that port. So the work-around is to
859          * pad such packets to a 36-byte length.
860          */
861         if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
862                 if (skb_padto(skb, 36))
863                         goto tx_drop;
864                 skb->len = 36;
865         }
866
867         /* For padded packets, BE HW modifies tot_len field in IP header
868          * incorrecly when VLAN tag is inserted by HW.
869          * For padded packets, Lancer computes incorrect checksum.
870          */
871         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
872                                                 VLAN_ETH_HLEN : ETH_HLEN;
873         if (skb->len <= 60 &&
874             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
875             is_ipv4_pkt(skb)) {
876                 ip = (struct iphdr *)ip_hdr(skb);
877                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
878         }
879
880         /* If vlan tag is already inlined in the packet, skip HW VLAN
881          * tagging in UMC mode
882          */
883         if ((adapter->function_mode & UMC_ENABLED) &&
884             veh->h_vlan_proto == htons(ETH_P_8021Q))
885                         *skip_hw_vlan = true;
886
887         /* HW has a bug wherein it will calculate CSUM for VLAN
888          * pkts even though it is disabled.
889          * Manually insert VLAN in pkt.
890          */
891         if (skb->ip_summed != CHECKSUM_PARTIAL &&
892             vlan_tx_tag_present(skb)) {
893                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
894                 if (unlikely(!skb))
895                         goto tx_drop;
896         }
897
898         /* HW may lockup when VLAN HW tagging is requested on
899          * certain ipv6 packets. Drop such pkts if the HW workaround to
900          * skip HW tagging is not enabled by FW.
901          */
902         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
903             (adapter->pvid || adapter->qnq_vid) &&
904             !qnq_async_evt_rcvd(adapter)))
905                 goto tx_drop;
906
907         /* Manual VLAN tag insertion to prevent:
908          * ASIC lockup when the ASIC inserts VLAN tag into
909          * certain ipv6 packets. Insert VLAN tags in driver,
910          * and set event, completion, vlan bits accordingly
911          * in the Tx WRB.
912          */
913         if (be_ipv6_tx_stall_chk(adapter, skb) &&
914             be_vlan_tag_tx_chk(adapter, skb)) {
915                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
916                 if (unlikely(!skb))
917                         goto tx_drop;
918         }
919
920         return skb;
921 tx_drop:
922         dev_kfree_skb_any(skb);
923         return NULL;
924 }
925
926 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
927 {
928         struct be_adapter *adapter = netdev_priv(netdev);
929         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
930         struct be_queue_info *txq = &txo->q;
931         bool dummy_wrb, stopped = false;
932         u32 wrb_cnt = 0, copied = 0;
933         bool skip_hw_vlan = false;
934         u32 start = txq->head;
935
936         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
937         if (!skb)
938                 return NETDEV_TX_OK;
939
940         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
941
942         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
943                               skip_hw_vlan);
944         if (copied) {
945                 int gso_segs = skb_shinfo(skb)->gso_segs;
946
947                 /* record the sent skb in the sent_skb table */
948                 BUG_ON(txo->sent_skb_list[start]);
949                 txo->sent_skb_list[start] = skb;
950
951                 /* Ensure txq has space for the next skb; Else stop the queue
952                  * *BEFORE* ringing the tx doorbell, so that we serialze the
953                  * tx compls of the current transmit which'll wake up the queue
954                  */
955                 atomic_add(wrb_cnt, &txq->used);
956                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
957                                                                 txq->len) {
958                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
959                         stopped = true;
960                 }
961
962                 be_txq_notify(adapter, txo, wrb_cnt);
963
964                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
965         } else {
966                 txq->head = start;
967                 dev_kfree_skb_any(skb);
968         }
969         return NETDEV_TX_OK;
970 }
971
972 static int be_change_mtu(struct net_device *netdev, int new_mtu)
973 {
974         struct be_adapter *adapter = netdev_priv(netdev);
975         if (new_mtu < BE_MIN_MTU ||
976                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
977                                         (ETH_HLEN + ETH_FCS_LEN))) {
978                 dev_info(&adapter->pdev->dev,
979                         "MTU must be between %d and %d bytes\n",
980                         BE_MIN_MTU,
981                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
982                 return -EINVAL;
983         }
984         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
985                         netdev->mtu, new_mtu);
986         netdev->mtu = new_mtu;
987         return 0;
988 }
989
990 /*
991  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
992  * If the user configures more, place BE in vlan promiscuous mode.
993  */
994 static int be_vid_config(struct be_adapter *adapter)
995 {
996         u16 vids[BE_NUM_VLANS_SUPPORTED];
997         u16 num = 0, i;
998         int status = 0;
999
1000         /* No need to further configure vids if in promiscuous mode */
1001         if (adapter->promiscuous)
1002                 return 0;
1003
1004         if (adapter->vlans_added > adapter->max_vlans)
1005                 goto set_vlan_promisc;
1006
1007         /* Construct VLAN Table to give to HW */
1008         for (i = 0; i < VLAN_N_VID; i++)
1009                 if (adapter->vlan_tag[i])
1010                         vids[num++] = cpu_to_le16(i);
1011
1012         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1013                                     vids, num, 1, 0);
1014
1015         /* Set to VLAN promisc mode as setting VLAN filter failed */
1016         if (status) {
1017                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1018                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1019                 goto set_vlan_promisc;
1020         }
1021
1022         return status;
1023
1024 set_vlan_promisc:
1025         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1026                                     NULL, 0, 1, 1);
1027         return status;
1028 }
1029
1030 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1031 {
1032         struct be_adapter *adapter = netdev_priv(netdev);
1033         int status = 0;
1034
1035         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1036                 status = -EINVAL;
1037                 goto ret;
1038         }
1039
1040         /* Packets with VID 0 are always received by Lancer by default */
1041         if (lancer_chip(adapter) && vid == 0)
1042                 goto ret;
1043
1044         adapter->vlan_tag[vid] = 1;
1045         if (adapter->vlans_added <= (adapter->max_vlans + 1))
1046                 status = be_vid_config(adapter);
1047
1048         if (!status)
1049                 adapter->vlans_added++;
1050         else
1051                 adapter->vlan_tag[vid] = 0;
1052 ret:
1053         return status;
1054 }
1055
1056 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1057 {
1058         struct be_adapter *adapter = netdev_priv(netdev);
1059         int status = 0;
1060
1061         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1062                 status = -EINVAL;
1063                 goto ret;
1064         }
1065
1066         /* Packets with VID 0 are always received by Lancer by default */
1067         if (lancer_chip(adapter) && vid == 0)
1068                 goto ret;
1069
1070         adapter->vlan_tag[vid] = 0;
1071         if (adapter->vlans_added <= adapter->max_vlans)
1072                 status = be_vid_config(adapter);
1073
1074         if (!status)
1075                 adapter->vlans_added--;
1076         else
1077                 adapter->vlan_tag[vid] = 1;
1078 ret:
1079         return status;
1080 }
1081
1082 static void be_set_rx_mode(struct net_device *netdev)
1083 {
1084         struct be_adapter *adapter = netdev_priv(netdev);
1085         int status;
1086
1087         if (netdev->flags & IFF_PROMISC) {
1088                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1089                 adapter->promiscuous = true;
1090                 goto done;
1091         }
1092
1093         /* BE was previously in promiscuous mode; disable it */
1094         if (adapter->promiscuous) {
1095                 adapter->promiscuous = false;
1096                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1097
1098                 if (adapter->vlans_added)
1099                         be_vid_config(adapter);
1100         }
1101
1102         /* Enable multicast promisc if num configured exceeds what we support */
1103         if (netdev->flags & IFF_ALLMULTI ||
1104             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1105                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1106                 goto done;
1107         }
1108
1109         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1110                 struct netdev_hw_addr *ha;
1111                 int i = 1; /* First slot is claimed by the Primary MAC */
1112
1113                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1114                         be_cmd_pmac_del(adapter, adapter->if_handle,
1115                                         adapter->pmac_id[i], 0);
1116                 }
1117
1118                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1119                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1120                         adapter->promiscuous = true;
1121                         goto done;
1122                 }
1123
1124                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1125                         adapter->uc_macs++; /* First slot is for Primary MAC */
1126                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1127                                         adapter->if_handle,
1128                                         &adapter->pmac_id[adapter->uc_macs], 0);
1129                 }
1130         }
1131
1132         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1133
1134         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1135         if (status) {
1136                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1137                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1138                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1139         }
1140 done:
1141         return;
1142 }
1143
1144 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1145 {
1146         struct be_adapter *adapter = netdev_priv(netdev);
1147         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1148         int status;
1149         bool active_mac = false;
1150         u32 pmac_id;
1151         u8 old_mac[ETH_ALEN];
1152
1153         if (!sriov_enabled(adapter))
1154                 return -EPERM;
1155
1156         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1157                 return -EINVAL;
1158
1159         if (lancer_chip(adapter)) {
1160                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1161                                                   &pmac_id, vf + 1);
1162                 if (!status && active_mac)
1163                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1164                                         pmac_id, vf + 1);
1165
1166                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1167         } else {
1168                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1169                                          vf_cfg->pmac_id, vf + 1);
1170
1171                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1172                                          &vf_cfg->pmac_id, vf + 1);
1173         }
1174
1175         if (status)
1176                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1177                                 mac, vf);
1178         else
1179                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1180
1181         return status;
1182 }
1183
1184 static int be_get_vf_config(struct net_device *netdev, int vf,
1185                         struct ifla_vf_info *vi)
1186 {
1187         struct be_adapter *adapter = netdev_priv(netdev);
1188         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1189
1190         if (!sriov_enabled(adapter))
1191                 return -EPERM;
1192
1193         if (vf >= adapter->num_vfs)
1194                 return -EINVAL;
1195
1196         vi->vf = vf;
1197         vi->tx_rate = vf_cfg->tx_rate;
1198         vi->vlan = vf_cfg->vlan_tag;
1199         vi->qos = 0;
1200         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1201
1202         return 0;
1203 }
1204
1205 static int be_set_vf_vlan(struct net_device *netdev,
1206                         int vf, u16 vlan, u8 qos)
1207 {
1208         struct be_adapter *adapter = netdev_priv(netdev);
1209         int status = 0;
1210
1211         if (!sriov_enabled(adapter))
1212                 return -EPERM;
1213
1214         if (vf >= adapter->num_vfs || vlan > 4095)
1215                 return -EINVAL;
1216
1217         if (vlan) {
1218                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1219                         /* If this is new value, program it. Else skip. */
1220                         adapter->vf_cfg[vf].vlan_tag = vlan;
1221
1222                         status = be_cmd_set_hsw_config(adapter, vlan,
1223                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1224                 }
1225         } else {
1226                 /* Reset Transparent Vlan Tagging. */
1227                 adapter->vf_cfg[vf].vlan_tag = 0;
1228                 vlan = adapter->vf_cfg[vf].def_vid;
1229                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1230                         adapter->vf_cfg[vf].if_handle);
1231         }
1232
1233
1234         if (status)
1235                 dev_info(&adapter->pdev->dev,
1236                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1237         return status;
1238 }
1239
1240 static int be_set_vf_tx_rate(struct net_device *netdev,
1241                         int vf, int rate)
1242 {
1243         struct be_adapter *adapter = netdev_priv(netdev);
1244         int status = 0;
1245
1246         if (!sriov_enabled(adapter))
1247                 return -EPERM;
1248
1249         if (vf >= adapter->num_vfs)
1250                 return -EINVAL;
1251
1252         if (rate < 100 || rate > 10000) {
1253                 dev_err(&adapter->pdev->dev,
1254                         "tx rate must be between 100 and 10000 Mbps\n");
1255                 return -EINVAL;
1256         }
1257
1258         if (lancer_chip(adapter))
1259                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1260         else
1261                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1262
1263         if (status)
1264                 dev_err(&adapter->pdev->dev,
1265                                 "tx rate %d on VF %d failed\n", rate, vf);
1266         else
1267                 adapter->vf_cfg[vf].tx_rate = rate;
1268         return status;
1269 }
1270
1271 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1272 {
1273         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1274         ulong now = jiffies;
1275         ulong delta = now - stats->rx_jiffies;
1276         u64 pkts;
1277         unsigned int start, eqd;
1278
1279         if (!eqo->enable_aic) {
1280                 eqd = eqo->eqd;
1281                 goto modify_eqd;
1282         }
1283
1284         if (eqo->idx >= adapter->num_rx_qs)
1285                 return;
1286
1287         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1288
1289         /* Wrapped around */
1290         if (time_before(now, stats->rx_jiffies)) {
1291                 stats->rx_jiffies = now;
1292                 return;
1293         }
1294
1295         /* Update once a second */
1296         if (delta < HZ)
1297                 return;
1298
1299         do {
1300                 start = u64_stats_fetch_begin_bh(&stats->sync);
1301                 pkts = stats->rx_pkts;
1302         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1303
1304         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1305         stats->rx_pkts_prev = pkts;
1306         stats->rx_jiffies = now;
1307         eqd = (stats->rx_pps / 110000) << 3;
1308         eqd = min(eqd, eqo->max_eqd);
1309         eqd = max(eqd, eqo->min_eqd);
1310         if (eqd < 10)
1311                 eqd = 0;
1312
1313 modify_eqd:
1314         if (eqd != eqo->cur_eqd) {
1315                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1316                 eqo->cur_eqd = eqd;
1317         }
1318 }
1319
1320 static void be_rx_stats_update(struct be_rx_obj *rxo,
1321                 struct be_rx_compl_info *rxcp)
1322 {
1323         struct be_rx_stats *stats = rx_stats(rxo);
1324
1325         u64_stats_update_begin(&stats->sync);
1326         stats->rx_compl++;
1327         stats->rx_bytes += rxcp->pkt_size;
1328         stats->rx_pkts++;
1329         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1330                 stats->rx_mcast_pkts++;
1331         if (rxcp->err)
1332                 stats->rx_compl_err++;
1333         u64_stats_update_end(&stats->sync);
1334 }
1335
1336 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1337 {
1338         /* L4 checksum is not reliable for non TCP/UDP packets.
1339          * Also ignore ipcksm for ipv6 pkts */
1340         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1341                                 (rxcp->ip_csum || rxcp->ipv6);
1342 }
1343
1344 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1345                                                 u16 frag_idx)
1346 {
1347         struct be_adapter *adapter = rxo->adapter;
1348         struct be_rx_page_info *rx_page_info;
1349         struct be_queue_info *rxq = &rxo->q;
1350
1351         rx_page_info = &rxo->page_info_tbl[frag_idx];
1352         BUG_ON(!rx_page_info->page);
1353
1354         if (rx_page_info->last_page_user) {
1355                 dma_unmap_page(&adapter->pdev->dev,
1356                                dma_unmap_addr(rx_page_info, bus),
1357                                adapter->big_page_size, DMA_FROM_DEVICE);
1358                 rx_page_info->last_page_user = false;
1359         }
1360
1361         atomic_dec(&rxq->used);
1362         return rx_page_info;
1363 }
1364
1365 /* Throwaway the data in the Rx completion */
1366 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1367                                 struct be_rx_compl_info *rxcp)
1368 {
1369         struct be_queue_info *rxq = &rxo->q;
1370         struct be_rx_page_info *page_info;
1371         u16 i, num_rcvd = rxcp->num_rcvd;
1372
1373         for (i = 0; i < num_rcvd; i++) {
1374                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1375                 put_page(page_info->page);
1376                 memset(page_info, 0, sizeof(*page_info));
1377                 index_inc(&rxcp->rxq_idx, rxq->len);
1378         }
1379 }
1380
1381 /*
1382  * skb_fill_rx_data forms a complete skb for an ether frame
1383  * indicated by rxcp.
1384  */
1385 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1386                              struct be_rx_compl_info *rxcp)
1387 {
1388         struct be_queue_info *rxq = &rxo->q;
1389         struct be_rx_page_info *page_info;
1390         u16 i, j;
1391         u16 hdr_len, curr_frag_len, remaining;
1392         u8 *start;
1393
1394         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1395         start = page_address(page_info->page) + page_info->page_offset;
1396         prefetch(start);
1397
1398         /* Copy data in the first descriptor of this completion */
1399         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1400
1401         skb->len = curr_frag_len;
1402         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1403                 memcpy(skb->data, start, curr_frag_len);
1404                 /* Complete packet has now been moved to data */
1405                 put_page(page_info->page);
1406                 skb->data_len = 0;
1407                 skb->tail += curr_frag_len;
1408         } else {
1409                 hdr_len = ETH_HLEN;
1410                 memcpy(skb->data, start, hdr_len);
1411                 skb_shinfo(skb)->nr_frags = 1;
1412                 skb_frag_set_page(skb, 0, page_info->page);
1413                 skb_shinfo(skb)->frags[0].page_offset =
1414                                         page_info->page_offset + hdr_len;
1415                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1416                 skb->data_len = curr_frag_len - hdr_len;
1417                 skb->truesize += rx_frag_size;
1418                 skb->tail += hdr_len;
1419         }
1420         page_info->page = NULL;
1421
1422         if (rxcp->pkt_size <= rx_frag_size) {
1423                 BUG_ON(rxcp->num_rcvd != 1);
1424                 return;
1425         }
1426
1427         /* More frags present for this completion */
1428         index_inc(&rxcp->rxq_idx, rxq->len);
1429         remaining = rxcp->pkt_size - curr_frag_len;
1430         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1431                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1432                 curr_frag_len = min(remaining, rx_frag_size);
1433
1434                 /* Coalesce all frags from the same physical page in one slot */
1435                 if (page_info->page_offset == 0) {
1436                         /* Fresh page */
1437                         j++;
1438                         skb_frag_set_page(skb, j, page_info->page);
1439                         skb_shinfo(skb)->frags[j].page_offset =
1440                                                         page_info->page_offset;
1441                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1442                         skb_shinfo(skb)->nr_frags++;
1443                 } else {
1444                         put_page(page_info->page);
1445                 }
1446
1447                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1448                 skb->len += curr_frag_len;
1449                 skb->data_len += curr_frag_len;
1450                 skb->truesize += rx_frag_size;
1451                 remaining -= curr_frag_len;
1452                 index_inc(&rxcp->rxq_idx, rxq->len);
1453                 page_info->page = NULL;
1454         }
1455         BUG_ON(j > MAX_SKB_FRAGS);
1456 }
1457
1458 /* Process the RX completion indicated by rxcp when GRO is disabled */
1459 static void be_rx_compl_process(struct be_rx_obj *rxo,
1460                                 struct be_rx_compl_info *rxcp)
1461 {
1462         struct be_adapter *adapter = rxo->adapter;
1463         struct net_device *netdev = adapter->netdev;
1464         struct sk_buff *skb;
1465
1466         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1467         if (unlikely(!skb)) {
1468                 rx_stats(rxo)->rx_drops_no_skbs++;
1469                 be_rx_compl_discard(rxo, rxcp);
1470                 return;
1471         }
1472
1473         skb_fill_rx_data(rxo, skb, rxcp);
1474
1475         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1476                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1477         else
1478                 skb_checksum_none_assert(skb);
1479
1480         skb->protocol = eth_type_trans(skb, netdev);
1481         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1482         if (netdev->features & NETIF_F_RXHASH)
1483                 skb->rxhash = rxcp->rss_hash;
1484
1485
1486         if (rxcp->vlanf)
1487                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1488
1489         netif_receive_skb(skb);
1490 }
1491
1492 /* Process the RX completion indicated by rxcp when GRO is enabled */
1493 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1494                              struct be_rx_compl_info *rxcp)
1495 {
1496         struct be_adapter *adapter = rxo->adapter;
1497         struct be_rx_page_info *page_info;
1498         struct sk_buff *skb = NULL;
1499         struct be_queue_info *rxq = &rxo->q;
1500         u16 remaining, curr_frag_len;
1501         u16 i, j;
1502
1503         skb = napi_get_frags(napi);
1504         if (!skb) {
1505                 be_rx_compl_discard(rxo, rxcp);
1506                 return;
1507         }
1508
1509         remaining = rxcp->pkt_size;
1510         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1511                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1512
1513                 curr_frag_len = min(remaining, rx_frag_size);
1514
1515                 /* Coalesce all frags from the same physical page in one slot */
1516                 if (i == 0 || page_info->page_offset == 0) {
1517                         /* First frag or Fresh page */
1518                         j++;
1519                         skb_frag_set_page(skb, j, page_info->page);
1520                         skb_shinfo(skb)->frags[j].page_offset =
1521                                                         page_info->page_offset;
1522                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1523                 } else {
1524                         put_page(page_info->page);
1525                 }
1526                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1527                 skb->truesize += rx_frag_size;
1528                 remaining -= curr_frag_len;
1529                 index_inc(&rxcp->rxq_idx, rxq->len);
1530                 memset(page_info, 0, sizeof(*page_info));
1531         }
1532         BUG_ON(j > MAX_SKB_FRAGS);
1533
1534         skb_shinfo(skb)->nr_frags = j + 1;
1535         skb->len = rxcp->pkt_size;
1536         skb->data_len = rxcp->pkt_size;
1537         skb->ip_summed = CHECKSUM_UNNECESSARY;
1538         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1539         if (adapter->netdev->features & NETIF_F_RXHASH)
1540                 skb->rxhash = rxcp->rss_hash;
1541
1542         if (rxcp->vlanf)
1543                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1544
1545         napi_gro_frags(napi);
1546 }
1547
1548 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1549                                  struct be_rx_compl_info *rxcp)
1550 {
1551         rxcp->pkt_size =
1552                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1553         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1554         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1555         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1556         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1557         rxcp->ip_csum =
1558                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1559         rxcp->l4_csum =
1560                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1561         rxcp->ipv6 =
1562                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1563         rxcp->rxq_idx =
1564                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1565         rxcp->num_rcvd =
1566                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1567         rxcp->pkt_type =
1568                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1569         rxcp->rss_hash =
1570                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1571         if (rxcp->vlanf) {
1572                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1573                                           compl);
1574                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1575                                                compl);
1576         }
1577         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1578 }
1579
1580 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1581                                  struct be_rx_compl_info *rxcp)
1582 {
1583         rxcp->pkt_size =
1584                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1585         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1586         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1587         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1588         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1589         rxcp->ip_csum =
1590                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1591         rxcp->l4_csum =
1592                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1593         rxcp->ipv6 =
1594                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1595         rxcp->rxq_idx =
1596                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1597         rxcp->num_rcvd =
1598                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1599         rxcp->pkt_type =
1600                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1601         rxcp->rss_hash =
1602                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1603         if (rxcp->vlanf) {
1604                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1605                                           compl);
1606                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1607                                                compl);
1608         }
1609         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1610         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1611                                       ip_frag, compl);
1612 }
1613
1614 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1615 {
1616         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1617         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1618         struct be_adapter *adapter = rxo->adapter;
1619
1620         /* For checking the valid bit it is Ok to use either definition as the
1621          * valid bit is at the same position in both v0 and v1 Rx compl */
1622         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1623                 return NULL;
1624
1625         rmb();
1626         be_dws_le_to_cpu(compl, sizeof(*compl));
1627
1628         if (adapter->be3_native)
1629                 be_parse_rx_compl_v1(compl, rxcp);
1630         else
1631                 be_parse_rx_compl_v0(compl, rxcp);
1632
1633         if (rxcp->ip_frag)
1634                 rxcp->l4_csum = 0;
1635
1636         if (rxcp->vlanf) {
1637                 /* vlanf could be wrongly set in some cards.
1638                  * ignore if vtm is not set */
1639                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1640                         rxcp->vlanf = 0;
1641
1642                 if (!lancer_chip(adapter))
1643                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1644
1645                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1646                     !adapter->vlan_tag[rxcp->vlan_tag])
1647                         rxcp->vlanf = 0;
1648         }
1649
1650         /* As the compl has been parsed, reset it; we wont touch it again */
1651         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1652
1653         queue_tail_inc(&rxo->cq);
1654         return rxcp;
1655 }
1656
1657 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1658 {
1659         u32 order = get_order(size);
1660
1661         if (order > 0)
1662                 gfp |= __GFP_COMP;
1663         return  alloc_pages(gfp, order);
1664 }
1665
1666 /*
1667  * Allocate a page, split it to fragments of size rx_frag_size and post as
1668  * receive buffers to BE
1669  */
1670 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1671 {
1672         struct be_adapter *adapter = rxo->adapter;
1673         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1674         struct be_queue_info *rxq = &rxo->q;
1675         struct page *pagep = NULL;
1676         struct be_eth_rx_d *rxd;
1677         u64 page_dmaaddr = 0, frag_dmaaddr;
1678         u32 posted, page_offset = 0;
1679
1680         page_info = &rxo->page_info_tbl[rxq->head];
1681         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1682                 if (!pagep) {
1683                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1684                         if (unlikely(!pagep)) {
1685                                 rx_stats(rxo)->rx_post_fail++;
1686                                 break;
1687                         }
1688                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1689                                                     0, adapter->big_page_size,
1690                                                     DMA_FROM_DEVICE);
1691                         page_info->page_offset = 0;
1692                 } else {
1693                         get_page(pagep);
1694                         page_info->page_offset = page_offset + rx_frag_size;
1695                 }
1696                 page_offset = page_info->page_offset;
1697                 page_info->page = pagep;
1698                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1699                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1700
1701                 rxd = queue_head_node(rxq);
1702                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1703                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1704
1705                 /* Any space left in the current big page for another frag? */
1706                 if ((page_offset + rx_frag_size + rx_frag_size) >
1707                                         adapter->big_page_size) {
1708                         pagep = NULL;
1709                         page_info->last_page_user = true;
1710                 }
1711
1712                 prev_page_info = page_info;
1713                 queue_head_inc(rxq);
1714                 page_info = &rxo->page_info_tbl[rxq->head];
1715         }
1716         if (pagep)
1717                 prev_page_info->last_page_user = true;
1718
1719         if (posted) {
1720                 atomic_add(posted, &rxq->used);
1721                 be_rxq_notify(adapter, rxq->id, posted);
1722         } else if (atomic_read(&rxq->used) == 0) {
1723                 /* Let be_worker replenish when memory is available */
1724                 rxo->rx_post_starved = true;
1725         }
1726 }
1727
1728 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1729 {
1730         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1731
1732         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1733                 return NULL;
1734
1735         rmb();
1736         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1737
1738         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1739
1740         queue_tail_inc(tx_cq);
1741         return txcp;
1742 }
1743
1744 static u16 be_tx_compl_process(struct be_adapter *adapter,
1745                 struct be_tx_obj *txo, u16 last_index)
1746 {
1747         struct be_queue_info *txq = &txo->q;
1748         struct be_eth_wrb *wrb;
1749         struct sk_buff **sent_skbs = txo->sent_skb_list;
1750         struct sk_buff *sent_skb;
1751         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1752         bool unmap_skb_hdr = true;
1753
1754         sent_skb = sent_skbs[txq->tail];
1755         BUG_ON(!sent_skb);
1756         sent_skbs[txq->tail] = NULL;
1757
1758         /* skip header wrb */
1759         queue_tail_inc(txq);
1760
1761         do {
1762                 cur_index = txq->tail;
1763                 wrb = queue_tail_node(txq);
1764                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1765                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1766                 unmap_skb_hdr = false;
1767
1768                 num_wrbs++;
1769                 queue_tail_inc(txq);
1770         } while (cur_index != last_index);
1771
1772         kfree_skb(sent_skb);
1773         return num_wrbs;
1774 }
1775
1776 /* Return the number of events in the event queue */
1777 static inline int events_get(struct be_eq_obj *eqo)
1778 {
1779         struct be_eq_entry *eqe;
1780         int num = 0;
1781
1782         do {
1783                 eqe = queue_tail_node(&eqo->q);
1784                 if (eqe->evt == 0)
1785                         break;
1786
1787                 rmb();
1788                 eqe->evt = 0;
1789                 num++;
1790                 queue_tail_inc(&eqo->q);
1791         } while (true);
1792
1793         return num;
1794 }
1795
1796 /* Leaves the EQ is disarmed state */
1797 static void be_eq_clean(struct be_eq_obj *eqo)
1798 {
1799         int num = events_get(eqo);
1800
1801         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1802 }
1803
1804 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1805 {
1806         struct be_rx_page_info *page_info;
1807         struct be_queue_info *rxq = &rxo->q;
1808         struct be_queue_info *rx_cq = &rxo->cq;
1809         struct be_rx_compl_info *rxcp;
1810         struct be_adapter *adapter = rxo->adapter;
1811         int flush_wait = 0;
1812         u16 tail;
1813
1814         /* Consume pending rx completions.
1815          * Wait for the flush completion (identified by zero num_rcvd)
1816          * to arrive. Notify CQ even when there are no more CQ entries
1817          * for HW to flush partially coalesced CQ entries.
1818          * In Lancer, there is no need to wait for flush compl.
1819          */
1820         for (;;) {
1821                 rxcp = be_rx_compl_get(rxo);
1822                 if (rxcp == NULL) {
1823                         if (lancer_chip(adapter))
1824                                 break;
1825
1826                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1827                                 dev_warn(&adapter->pdev->dev,
1828                                          "did not receive flush compl\n");
1829                                 break;
1830                         }
1831                         be_cq_notify(adapter, rx_cq->id, true, 0);
1832                         mdelay(1);
1833                 } else {
1834                         be_rx_compl_discard(rxo, rxcp);
1835                         be_cq_notify(adapter, rx_cq->id, false, 1);
1836                         if (rxcp->num_rcvd == 0)
1837                                 break;
1838                 }
1839         }
1840
1841         /* After cleanup, leave the CQ in unarmed state */
1842         be_cq_notify(adapter, rx_cq->id, false, 0);
1843
1844         /* Then free posted rx buffers that were not used */
1845         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1846         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1847                 page_info = get_rx_page_info(rxo, tail);
1848                 put_page(page_info->page);
1849                 memset(page_info, 0, sizeof(*page_info));
1850         }
1851         BUG_ON(atomic_read(&rxq->used));
1852         rxq->tail = rxq->head = 0;
1853 }
1854
1855 static void be_tx_compl_clean(struct be_adapter *adapter)
1856 {
1857         struct be_tx_obj *txo;
1858         struct be_queue_info *txq;
1859         struct be_eth_tx_compl *txcp;
1860         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1861         struct sk_buff *sent_skb;
1862         bool dummy_wrb;
1863         int i, pending_txqs;
1864
1865         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1866         do {
1867                 pending_txqs = adapter->num_tx_qs;
1868
1869                 for_all_tx_queues(adapter, txo, i) {
1870                         txq = &txo->q;
1871                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1872                                 end_idx =
1873                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1874                                                       wrb_index, txcp);
1875                                 num_wrbs += be_tx_compl_process(adapter, txo,
1876                                                                 end_idx);
1877                                 cmpl++;
1878                         }
1879                         if (cmpl) {
1880                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1881                                 atomic_sub(num_wrbs, &txq->used);
1882                                 cmpl = 0;
1883                                 num_wrbs = 0;
1884                         }
1885                         if (atomic_read(&txq->used) == 0)
1886                                 pending_txqs--;
1887                 }
1888
1889                 if (pending_txqs == 0 || ++timeo > 200)
1890                         break;
1891
1892                 mdelay(1);
1893         } while (true);
1894
1895         for_all_tx_queues(adapter, txo, i) {
1896                 txq = &txo->q;
1897                 if (atomic_read(&txq->used))
1898                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1899                                 atomic_read(&txq->used));
1900
1901                 /* free posted tx for which compls will never arrive */
1902                 while (atomic_read(&txq->used)) {
1903                         sent_skb = txo->sent_skb_list[txq->tail];
1904                         end_idx = txq->tail;
1905                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1906                                                    &dummy_wrb);
1907                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1908                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1909                         atomic_sub(num_wrbs, &txq->used);
1910                 }
1911         }
1912 }
1913
1914 static void be_evt_queues_destroy(struct be_adapter *adapter)
1915 {
1916         struct be_eq_obj *eqo;
1917         int i;
1918
1919         for_all_evt_queues(adapter, eqo, i) {
1920                 if (eqo->q.created) {
1921                         be_eq_clean(eqo);
1922                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1923                 }
1924                 be_queue_free(adapter, &eqo->q);
1925         }
1926 }
1927
1928 static int be_evt_queues_create(struct be_adapter *adapter)
1929 {
1930         struct be_queue_info *eq;
1931         struct be_eq_obj *eqo;
1932         int i, rc;
1933
1934         adapter->num_evt_qs = num_irqs(adapter);
1935
1936         for_all_evt_queues(adapter, eqo, i) {
1937                 eqo->adapter = adapter;
1938                 eqo->tx_budget = BE_TX_BUDGET;
1939                 eqo->idx = i;
1940                 eqo->max_eqd = BE_MAX_EQD;
1941                 eqo->enable_aic = true;
1942
1943                 eq = &eqo->q;
1944                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1945                                         sizeof(struct be_eq_entry));
1946                 if (rc)
1947                         return rc;
1948
1949                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1950                 if (rc)
1951                         return rc;
1952         }
1953         return 0;
1954 }
1955
1956 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1957 {
1958         struct be_queue_info *q;
1959
1960         q = &adapter->mcc_obj.q;
1961         if (q->created)
1962                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1963         be_queue_free(adapter, q);
1964
1965         q = &adapter->mcc_obj.cq;
1966         if (q->created)
1967                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1968         be_queue_free(adapter, q);
1969 }
1970
1971 /* Must be called only after TX qs are created as MCC shares TX EQ */
1972 static int be_mcc_queues_create(struct be_adapter *adapter)
1973 {
1974         struct be_queue_info *q, *cq;
1975
1976         cq = &adapter->mcc_obj.cq;
1977         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1978                         sizeof(struct be_mcc_compl)))
1979                 goto err;
1980
1981         /* Use the default EQ for MCC completions */
1982         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1983                 goto mcc_cq_free;
1984
1985         q = &adapter->mcc_obj.q;
1986         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1987                 goto mcc_cq_destroy;
1988
1989         if (be_cmd_mccq_create(adapter, q, cq))
1990                 goto mcc_q_free;
1991
1992         return 0;
1993
1994 mcc_q_free:
1995         be_queue_free(adapter, q);
1996 mcc_cq_destroy:
1997         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1998 mcc_cq_free:
1999         be_queue_free(adapter, cq);
2000 err:
2001         return -1;
2002 }
2003
2004 static void be_tx_queues_destroy(struct be_adapter *adapter)
2005 {
2006         struct be_queue_info *q;
2007         struct be_tx_obj *txo;
2008         u8 i;
2009
2010         for_all_tx_queues(adapter, txo, i) {
2011                 q = &txo->q;
2012                 if (q->created)
2013                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2014                 be_queue_free(adapter, q);
2015
2016                 q = &txo->cq;
2017                 if (q->created)
2018                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2019                 be_queue_free(adapter, q);
2020         }
2021 }
2022
2023 static int be_num_txqs_want(struct be_adapter *adapter)
2024 {
2025         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2026             be_is_mc(adapter) ||
2027             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2028             BE2_chip(adapter))
2029                 return 1;
2030         else
2031                 return adapter->max_tx_queues;
2032 }
2033
2034 static int be_tx_cqs_create(struct be_adapter *adapter)
2035 {
2036         struct be_queue_info *cq, *eq;
2037         int status;
2038         struct be_tx_obj *txo;
2039         u8 i;
2040
2041         adapter->num_tx_qs = be_num_txqs_want(adapter);
2042         if (adapter->num_tx_qs != MAX_TX_QS) {
2043                 rtnl_lock();
2044                 netif_set_real_num_tx_queues(adapter->netdev,
2045                         adapter->num_tx_qs);
2046                 rtnl_unlock();
2047         }
2048
2049         for_all_tx_queues(adapter, txo, i) {
2050                 cq = &txo->cq;
2051                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2052                                         sizeof(struct be_eth_tx_compl));
2053                 if (status)
2054                         return status;
2055
2056                 /* If num_evt_qs is less than num_tx_qs, then more than
2057                  * one txq share an eq
2058                  */
2059                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2060                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2061                 if (status)
2062                         return status;
2063         }
2064         return 0;
2065 }
2066
2067 static int be_tx_qs_create(struct be_adapter *adapter)
2068 {
2069         struct be_tx_obj *txo;
2070         int i, status;
2071
2072         for_all_tx_queues(adapter, txo, i) {
2073                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2074                                         sizeof(struct be_eth_wrb));
2075                 if (status)
2076                         return status;
2077
2078                 status = be_cmd_txq_create(adapter, txo);
2079                 if (status)
2080                         return status;
2081         }
2082
2083         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2084                  adapter->num_tx_qs);
2085         return 0;
2086 }
2087
2088 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2089 {
2090         struct be_queue_info *q;
2091         struct be_rx_obj *rxo;
2092         int i;
2093
2094         for_all_rx_queues(adapter, rxo, i) {
2095                 q = &rxo->cq;
2096                 if (q->created)
2097                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2098                 be_queue_free(adapter, q);
2099         }
2100 }
2101
2102 static int be_rx_cqs_create(struct be_adapter *adapter)
2103 {
2104         struct be_queue_info *eq, *cq;
2105         struct be_rx_obj *rxo;
2106         int rc, i;
2107
2108         /* We'll create as many RSS rings as there are irqs.
2109          * But when there's only one irq there's no use creating RSS rings
2110          */
2111         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2112                                 num_irqs(adapter) + 1 : 1;
2113         if (adapter->num_rx_qs != MAX_RX_QS) {
2114                 rtnl_lock();
2115                 netif_set_real_num_rx_queues(adapter->netdev,
2116                                              adapter->num_rx_qs);
2117                 rtnl_unlock();
2118         }
2119
2120         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2121         for_all_rx_queues(adapter, rxo, i) {
2122                 rxo->adapter = adapter;
2123                 cq = &rxo->cq;
2124                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2125                                 sizeof(struct be_eth_rx_compl));
2126                 if (rc)
2127                         return rc;
2128
2129                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2130                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2131                 if (rc)
2132                         return rc;
2133         }
2134
2135         dev_info(&adapter->pdev->dev,
2136                  "created %d RSS queue(s) and 1 default RX queue\n",
2137                  adapter->num_rx_qs - 1);
2138         return 0;
2139 }
2140
2141 static irqreturn_t be_intx(int irq, void *dev)
2142 {
2143         struct be_eq_obj *eqo = dev;
2144         struct be_adapter *adapter = eqo->adapter;
2145         int num_evts = 0;
2146
2147         /* IRQ is not expected when NAPI is scheduled as the EQ
2148          * will not be armed.
2149          * But, this can happen on Lancer INTx where it takes
2150          * a while to de-assert INTx or in BE2 where occasionaly
2151          * an interrupt may be raised even when EQ is unarmed.
2152          * If NAPI is already scheduled, then counting & notifying
2153          * events will orphan them.
2154          */
2155         if (napi_schedule_prep(&eqo->napi)) {
2156                 num_evts = events_get(eqo);
2157                 __napi_schedule(&eqo->napi);
2158                 if (num_evts)
2159                         eqo->spurious_intr = 0;
2160         }
2161         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2162
2163         /* Return IRQ_HANDLED only for the the first spurious intr
2164          * after a valid intr to stop the kernel from branding
2165          * this irq as a bad one!
2166          */
2167         if (num_evts || eqo->spurious_intr++ == 0)
2168                 return IRQ_HANDLED;
2169         else
2170                 return IRQ_NONE;
2171 }
2172
2173 static irqreturn_t be_msix(int irq, void *dev)
2174 {
2175         struct be_eq_obj *eqo = dev;
2176
2177         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2178         napi_schedule(&eqo->napi);
2179         return IRQ_HANDLED;
2180 }
2181
2182 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2183 {
2184         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2185 }
2186
2187 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2188                         int budget)
2189 {
2190         struct be_adapter *adapter = rxo->adapter;
2191         struct be_queue_info *rx_cq = &rxo->cq;
2192         struct be_rx_compl_info *rxcp;
2193         u32 work_done;
2194
2195         for (work_done = 0; work_done < budget; work_done++) {
2196                 rxcp = be_rx_compl_get(rxo);
2197                 if (!rxcp)
2198                         break;
2199
2200                 /* Is it a flush compl that has no data */
2201                 if (unlikely(rxcp->num_rcvd == 0))
2202                         goto loop_continue;
2203
2204                 /* Discard compl with partial DMA Lancer B0 */
2205                 if (unlikely(!rxcp->pkt_size)) {
2206                         be_rx_compl_discard(rxo, rxcp);
2207                         goto loop_continue;
2208                 }
2209
2210                 /* On BE drop pkts that arrive due to imperfect filtering in
2211                  * promiscuous mode on some skews
2212                  */
2213                 if (unlikely(rxcp->port != adapter->port_num &&
2214                                 !lancer_chip(adapter))) {
2215                         be_rx_compl_discard(rxo, rxcp);
2216                         goto loop_continue;
2217                 }
2218
2219                 if (do_gro(rxcp))
2220                         be_rx_compl_process_gro(rxo, napi, rxcp);
2221                 else
2222                         be_rx_compl_process(rxo, rxcp);
2223 loop_continue:
2224                 be_rx_stats_update(rxo, rxcp);
2225         }
2226
2227         if (work_done) {
2228                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2229
2230                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2231                         be_post_rx_frags(rxo, GFP_ATOMIC);
2232         }
2233
2234         return work_done;
2235 }
2236
2237 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2238                           int budget, int idx)
2239 {
2240         struct be_eth_tx_compl *txcp;
2241         int num_wrbs = 0, work_done;
2242
2243         for (work_done = 0; work_done < budget; work_done++) {
2244                 txcp = be_tx_compl_get(&txo->cq);
2245                 if (!txcp)
2246                         break;
2247                 num_wrbs += be_tx_compl_process(adapter, txo,
2248                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2249                                         wrb_index, txcp));
2250         }
2251
2252         if (work_done) {
2253                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2254                 atomic_sub(num_wrbs, &txo->q.used);
2255
2256                 /* As Tx wrbs have been freed up, wake up netdev queue
2257                  * if it was stopped due to lack of tx wrbs.  */
2258                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2259                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2260                         netif_wake_subqueue(adapter->netdev, idx);
2261                 }
2262
2263                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2264                 tx_stats(txo)->tx_compl += work_done;
2265                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2266         }
2267         return (work_done < budget); /* Done */
2268 }
2269
2270 int be_poll(struct napi_struct *napi, int budget)
2271 {
2272         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2273         struct be_adapter *adapter = eqo->adapter;
2274         int max_work = 0, work, i, num_evts;
2275         bool tx_done;
2276
2277         num_evts = events_get(eqo);
2278
2279         /* Process all TXQs serviced by this EQ */
2280         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2281                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2282                                         eqo->tx_budget, i);
2283                 if (!tx_done)
2284                         max_work = budget;
2285         }
2286
2287         /* This loop will iterate twice for EQ0 in which
2288          * completions of the last RXQ (default one) are also processed
2289          * For other EQs the loop iterates only once
2290          */
2291         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2292                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2293                 max_work = max(work, max_work);
2294         }
2295
2296         if (is_mcc_eqo(eqo))
2297                 be_process_mcc(adapter);
2298
2299         if (max_work < budget) {
2300                 napi_complete(napi);
2301                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2302         } else {
2303                 /* As we'll continue in polling mode, count and clear events */
2304                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2305         }
2306         return max_work;
2307 }
2308
2309 void be_detect_error(struct be_adapter *adapter)
2310 {
2311         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2312         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2313         u32 i;
2314
2315         if (be_hw_error(adapter))
2316                 return;
2317
2318         if (lancer_chip(adapter)) {
2319                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2320                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2321                         sliport_err1 = ioread32(adapter->db +
2322                                         SLIPORT_ERROR1_OFFSET);
2323                         sliport_err2 = ioread32(adapter->db +
2324                                         SLIPORT_ERROR2_OFFSET);
2325                 }
2326         } else {
2327                 pci_read_config_dword(adapter->pdev,
2328                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2329                 pci_read_config_dword(adapter->pdev,
2330                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2331                 pci_read_config_dword(adapter->pdev,
2332                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2333                 pci_read_config_dword(adapter->pdev,
2334                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2335
2336                 ue_lo = (ue_lo & ~ue_lo_mask);
2337                 ue_hi = (ue_hi & ~ue_hi_mask);
2338         }
2339
2340         /* On certain platforms BE hardware can indicate spurious UEs.
2341          * Allow the h/w to stop working completely in case of a real UE.
2342          * Hence not setting the hw_error for UE detection.
2343          */
2344         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2345                 adapter->hw_error = true;
2346                 dev_err(&adapter->pdev->dev,
2347                         "Error detected in the card\n");
2348         }
2349
2350         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2351                 dev_err(&adapter->pdev->dev,
2352                         "ERR: sliport status 0x%x\n", sliport_status);
2353                 dev_err(&adapter->pdev->dev,
2354                         "ERR: sliport error1 0x%x\n", sliport_err1);
2355                 dev_err(&adapter->pdev->dev,
2356                         "ERR: sliport error2 0x%x\n", sliport_err2);
2357         }
2358
2359         if (ue_lo) {
2360                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2361                         if (ue_lo & 1)
2362                                 dev_err(&adapter->pdev->dev,
2363                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2364                 }
2365         }
2366
2367         if (ue_hi) {
2368                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2369                         if (ue_hi & 1)
2370                                 dev_err(&adapter->pdev->dev,
2371                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2372                 }
2373         }
2374
2375 }
2376
2377 static void be_msix_disable(struct be_adapter *adapter)
2378 {
2379         if (msix_enabled(adapter)) {
2380                 pci_disable_msix(adapter->pdev);
2381                 adapter->num_msix_vec = 0;
2382         }
2383 }
2384
2385 static uint be_num_rss_want(struct be_adapter *adapter)
2386 {
2387         u32 num = 0;
2388
2389         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2390             (lancer_chip(adapter) ||
2391              (!sriov_want(adapter) && be_physfn(adapter)))) {
2392                 num = adapter->max_rss_queues;
2393                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2394         }
2395         return num;
2396 }
2397
2398 static int be_msix_enable(struct be_adapter *adapter)
2399 {
2400 #define BE_MIN_MSIX_VECTORS             1
2401         int i, status, num_vec, num_roce_vec = 0;
2402         struct device *dev = &adapter->pdev->dev;
2403
2404         /* If RSS queues are not used, need a vec for default RX Q */
2405         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2406         if (be_roce_supported(adapter)) {
2407                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2408                                         (num_online_cpus() + 1));
2409                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2410                 num_vec += num_roce_vec;
2411                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2412         }
2413         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2414
2415         for (i = 0; i < num_vec; i++)
2416                 adapter->msix_entries[i].entry = i;
2417
2418         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2419         if (status == 0) {
2420                 goto done;
2421         } else if (status >= BE_MIN_MSIX_VECTORS) {
2422                 num_vec = status;
2423                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2424                                          num_vec);
2425                 if (!status)
2426                         goto done;
2427         }
2428
2429         dev_warn(dev, "MSIx enable failed\n");
2430         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2431         if (!be_physfn(adapter))
2432                 return status;
2433         return 0;
2434 done:
2435         if (be_roce_supported(adapter)) {
2436                 if (num_vec > num_roce_vec) {
2437                         adapter->num_msix_vec = num_vec - num_roce_vec;
2438                         adapter->num_msix_roce_vec =
2439                                 num_vec - adapter->num_msix_vec;
2440                 } else {
2441                         adapter->num_msix_vec = num_vec;
2442                         adapter->num_msix_roce_vec = 0;
2443                 }
2444         } else
2445                 adapter->num_msix_vec = num_vec;
2446         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2447         return 0;
2448 }
2449
2450 static inline int be_msix_vec_get(struct be_adapter *adapter,
2451                                 struct be_eq_obj *eqo)
2452 {
2453         return adapter->msix_entries[eqo->idx].vector;
2454 }
2455
2456 static int be_msix_register(struct be_adapter *adapter)
2457 {
2458         struct net_device *netdev = adapter->netdev;
2459         struct be_eq_obj *eqo;
2460         int status, i, vec;
2461
2462         for_all_evt_queues(adapter, eqo, i) {
2463                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2464                 vec = be_msix_vec_get(adapter, eqo);
2465                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2466                 if (status)
2467                         goto err_msix;
2468         }
2469
2470         return 0;
2471 err_msix:
2472         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2473                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2474         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2475                 status);
2476         be_msix_disable(adapter);
2477         return status;
2478 }
2479
2480 static int be_irq_register(struct be_adapter *adapter)
2481 {
2482         struct net_device *netdev = adapter->netdev;
2483         int status;
2484
2485         if (msix_enabled(adapter)) {
2486                 status = be_msix_register(adapter);
2487                 if (status == 0)
2488                         goto done;
2489                 /* INTx is not supported for VF */
2490                 if (!be_physfn(adapter))
2491                         return status;
2492         }
2493
2494         /* INTx: only the first EQ is used */
2495         netdev->irq = adapter->pdev->irq;
2496         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2497                              &adapter->eq_obj[0]);
2498         if (status) {
2499                 dev_err(&adapter->pdev->dev,
2500                         "INTx request IRQ failed - err %d\n", status);
2501                 return status;
2502         }
2503 done:
2504         adapter->isr_registered = true;
2505         return 0;
2506 }
2507
2508 static void be_irq_unregister(struct be_adapter *adapter)
2509 {
2510         struct net_device *netdev = adapter->netdev;
2511         struct be_eq_obj *eqo;
2512         int i;
2513
2514         if (!adapter->isr_registered)
2515                 return;
2516
2517         /* INTx */
2518         if (!msix_enabled(adapter)) {
2519                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2520                 goto done;
2521         }
2522
2523         /* MSIx */
2524         for_all_evt_queues(adapter, eqo, i)
2525                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2526
2527 done:
2528         adapter->isr_registered = false;
2529 }
2530
2531 static void be_rx_qs_destroy(struct be_adapter *adapter)
2532 {
2533         struct be_queue_info *q;
2534         struct be_rx_obj *rxo;
2535         int i;
2536
2537         for_all_rx_queues(adapter, rxo, i) {
2538                 q = &rxo->q;
2539                 if (q->created) {
2540                         be_cmd_rxq_destroy(adapter, q);
2541                         be_rx_cq_clean(rxo);
2542                 }
2543                 be_queue_free(adapter, q);
2544         }
2545 }
2546
2547 static int be_close(struct net_device *netdev)
2548 {
2549         struct be_adapter *adapter = netdev_priv(netdev);
2550         struct be_eq_obj *eqo;
2551         int i;
2552
2553         be_roce_dev_close(adapter);
2554
2555         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2556                 for_all_evt_queues(adapter, eqo, i)
2557                         napi_disable(&eqo->napi);
2558                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2559         }
2560
2561         be_async_mcc_disable(adapter);
2562
2563         /* Wait for all pending tx completions to arrive so that
2564          * all tx skbs are freed.
2565          */
2566         be_tx_compl_clean(adapter);
2567         netif_tx_disable(netdev);
2568
2569         be_rx_qs_destroy(adapter);
2570
2571         for_all_evt_queues(adapter, eqo, i) {
2572                 if (msix_enabled(adapter))
2573                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2574                 else
2575                         synchronize_irq(netdev->irq);
2576                 be_eq_clean(eqo);
2577         }
2578
2579         be_irq_unregister(adapter);
2580
2581         return 0;
2582 }
2583
2584 static int be_rx_qs_create(struct be_adapter *adapter)
2585 {
2586         struct be_rx_obj *rxo;
2587         int rc, i, j;
2588         u8 rsstable[128];
2589
2590         for_all_rx_queues(adapter, rxo, i) {
2591                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2592                                     sizeof(struct be_eth_rx_d));
2593                 if (rc)
2594                         return rc;
2595         }
2596
2597         /* The FW would like the default RXQ to be created first */
2598         rxo = default_rxo(adapter);
2599         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2600                                adapter->if_handle, false, &rxo->rss_id);
2601         if (rc)
2602                 return rc;
2603
2604         for_all_rss_queues(adapter, rxo, i) {
2605                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2606                                        rx_frag_size, adapter->if_handle,
2607                                        true, &rxo->rss_id);
2608                 if (rc)
2609                         return rc;
2610         }
2611
2612         if (be_multi_rxq(adapter)) {
2613                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2614                         for_all_rss_queues(adapter, rxo, i) {
2615                                 if ((j + i) >= 128)
2616                                         break;
2617                                 rsstable[j + i] = rxo->rss_id;
2618                         }
2619                 }
2620                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2621                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2622
2623                 if (!BEx_chip(adapter))
2624                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2625                                                 RSS_ENABLE_UDP_IPV6;
2626
2627                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2628                                        128);
2629                 if (rc) {
2630                         adapter->rss_flags = 0;
2631                         return rc;
2632                 }
2633         }
2634
2635         /* First time posting */
2636         for_all_rx_queues(adapter, rxo, i)
2637                 be_post_rx_frags(rxo, GFP_KERNEL);
2638         return 0;
2639 }
2640
2641 static int be_open(struct net_device *netdev)
2642 {
2643         struct be_adapter *adapter = netdev_priv(netdev);
2644         struct be_eq_obj *eqo;
2645         struct be_rx_obj *rxo;
2646         struct be_tx_obj *txo;
2647         u8 link_status;
2648         int status, i;
2649
2650         status = be_rx_qs_create(adapter);
2651         if (status)
2652                 goto err;
2653
2654         status = be_irq_register(adapter);
2655         if (status)
2656                 goto err;
2657
2658         for_all_rx_queues(adapter, rxo, i)
2659                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2660
2661         for_all_tx_queues(adapter, txo, i)
2662                 be_cq_notify(adapter, txo->cq.id, true, 0);
2663
2664         be_async_mcc_enable(adapter);
2665
2666         for_all_evt_queues(adapter, eqo, i) {
2667                 napi_enable(&eqo->napi);
2668                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2669         }
2670         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2671
2672         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2673         if (!status)
2674                 be_link_status_update(adapter, link_status);
2675
2676         netif_tx_start_all_queues(netdev);
2677         be_roce_dev_open(adapter);
2678         return 0;
2679 err:
2680         be_close(adapter->netdev);
2681         return -EIO;
2682 }
2683
2684 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2685 {
2686         struct be_dma_mem cmd;
2687         int status = 0;
2688         u8 mac[ETH_ALEN];
2689
2690         memset(mac, 0, ETH_ALEN);
2691
2692         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2693         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2694                                     GFP_KERNEL | __GFP_ZERO);
2695         if (cmd.va == NULL)
2696                 return -1;
2697
2698         if (enable) {
2699                 status = pci_write_config_dword(adapter->pdev,
2700                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2701                 if (status) {
2702                         dev_err(&adapter->pdev->dev,
2703                                 "Could not enable Wake-on-lan\n");
2704                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2705                                           cmd.dma);
2706                         return status;
2707                 }
2708                 status = be_cmd_enable_magic_wol(adapter,
2709                                 adapter->netdev->dev_addr, &cmd);
2710                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2711                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2712         } else {
2713                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2714                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2715                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2716         }
2717
2718         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2719         return status;
2720 }
2721
2722 /*
2723  * Generate a seed MAC address from the PF MAC Address using jhash.
2724  * MAC Address for VFs are assigned incrementally starting from the seed.
2725  * These addresses are programmed in the ASIC by the PF and the VF driver
2726  * queries for the MAC address during its probe.
2727  */
2728 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2729 {
2730         u32 vf;
2731         int status = 0;
2732         u8 mac[ETH_ALEN];
2733         struct be_vf_cfg *vf_cfg;
2734
2735         be_vf_eth_addr_generate(adapter, mac);
2736
2737         for_all_vfs(adapter, vf_cfg, vf) {
2738                 if (lancer_chip(adapter)) {
2739                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2740                 } else {
2741                         status = be_cmd_pmac_add(adapter, mac,
2742                                                  vf_cfg->if_handle,
2743                                                  &vf_cfg->pmac_id, vf + 1);
2744                 }
2745
2746                 if (status)
2747                         dev_err(&adapter->pdev->dev,
2748                         "Mac address assignment failed for VF %d\n", vf);
2749                 else
2750                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2751
2752                 mac[5] += 1;
2753         }
2754         return status;
2755 }
2756
2757 static int be_vfs_mac_query(struct be_adapter *adapter)
2758 {
2759         int status, vf;
2760         u8 mac[ETH_ALEN];
2761         struct be_vf_cfg *vf_cfg;
2762         bool active = false;
2763
2764         for_all_vfs(adapter, vf_cfg, vf) {
2765                 be_cmd_get_mac_from_list(adapter, mac, &active,
2766                                          &vf_cfg->pmac_id, 0);
2767
2768                 status = be_cmd_mac_addr_query(adapter, mac, false,
2769                                                vf_cfg->if_handle, 0);
2770                 if (status)
2771                         return status;
2772                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2773         }
2774         return 0;
2775 }
2776
2777 static void be_vf_clear(struct be_adapter *adapter)
2778 {
2779         struct be_vf_cfg *vf_cfg;
2780         u32 vf;
2781
2782         if (pci_vfs_assigned(adapter->pdev)) {
2783                 dev_warn(&adapter->pdev->dev,
2784                          "VFs are assigned to VMs: not disabling VFs\n");
2785                 goto done;
2786         }
2787
2788         pci_disable_sriov(adapter->pdev);
2789
2790         for_all_vfs(adapter, vf_cfg, vf) {
2791                 if (lancer_chip(adapter))
2792                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2793                 else
2794                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2795                                         vf_cfg->pmac_id, vf + 1);
2796
2797                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2798         }
2799 done:
2800         kfree(adapter->vf_cfg);
2801         adapter->num_vfs = 0;
2802 }
2803
2804 static int be_clear(struct be_adapter *adapter)
2805 {
2806         int i = 1;
2807
2808         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2809                 cancel_delayed_work_sync(&adapter->work);
2810                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2811         }
2812
2813         if (sriov_enabled(adapter))
2814                 be_vf_clear(adapter);
2815
2816         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2817                 be_cmd_pmac_del(adapter, adapter->if_handle,
2818                         adapter->pmac_id[i], 0);
2819
2820         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2821
2822         be_mcc_queues_destroy(adapter);
2823         be_rx_cqs_destroy(adapter);
2824         be_tx_queues_destroy(adapter);
2825         be_evt_queues_destroy(adapter);
2826
2827         kfree(adapter->pmac_id);
2828         adapter->pmac_id = NULL;
2829
2830         be_msix_disable(adapter);
2831         return 0;
2832 }
2833
2834 static int be_vfs_if_create(struct be_adapter *adapter)
2835 {
2836         struct be_vf_cfg *vf_cfg;
2837         u32 cap_flags, en_flags, vf;
2838         int status;
2839
2840         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2841                     BE_IF_FLAGS_MULTICAST;
2842
2843         for_all_vfs(adapter, vf_cfg, vf) {
2844                 if (!BE3_chip(adapter))
2845                         be_cmd_get_profile_config(adapter, &cap_flags,
2846                                                   NULL, vf + 1);
2847
2848                 /* If a FW profile exists, then cap_flags are updated */
2849                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2850                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2851                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2852                                           &vf_cfg->if_handle, vf + 1);
2853                 if (status)
2854                         goto err;
2855         }
2856 err:
2857         return status;
2858 }
2859
2860 static int be_vf_setup_init(struct be_adapter *adapter)
2861 {
2862         struct be_vf_cfg *vf_cfg;
2863         int vf;
2864
2865         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2866                                   GFP_KERNEL);
2867         if (!adapter->vf_cfg)
2868                 return -ENOMEM;
2869
2870         for_all_vfs(adapter, vf_cfg, vf) {
2871                 vf_cfg->if_handle = -1;
2872                 vf_cfg->pmac_id = -1;
2873         }
2874         return 0;
2875 }
2876
2877 static int be_vf_setup(struct be_adapter *adapter)
2878 {
2879         struct be_vf_cfg *vf_cfg;
2880         u16 def_vlan, lnk_speed;
2881         int status, old_vfs, vf;
2882         struct device *dev = &adapter->pdev->dev;
2883         u32 privileges;
2884
2885         old_vfs = pci_num_vf(adapter->pdev);
2886         if (old_vfs) {
2887                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2888                 if (old_vfs != num_vfs)
2889                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2890                 adapter->num_vfs = old_vfs;
2891         } else {
2892                 if (num_vfs > adapter->dev_num_vfs)
2893                         dev_info(dev, "Device supports %d VFs and not %d\n",
2894                                  adapter->dev_num_vfs, num_vfs);
2895                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2896                 if (!adapter->num_vfs)
2897                         return 0;
2898         }
2899
2900         status = be_vf_setup_init(adapter);
2901         if (status)
2902                 goto err;
2903
2904         if (old_vfs) {
2905                 for_all_vfs(adapter, vf_cfg, vf) {
2906                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2907                         if (status)
2908                                 goto err;
2909                 }
2910         } else {
2911                 status = be_vfs_if_create(adapter);
2912                 if (status)
2913                         goto err;
2914         }
2915
2916         if (old_vfs) {
2917                 status = be_vfs_mac_query(adapter);
2918                 if (status)
2919                         goto err;
2920         } else {
2921                 status = be_vf_eth_addr_config(adapter);
2922                 if (status)
2923                         goto err;
2924         }
2925
2926         for_all_vfs(adapter, vf_cfg, vf) {
2927                 /* Allow VFs to programs MAC/VLAN filters */
2928                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2929                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2930                         status = be_cmd_set_fn_privileges(adapter,
2931                                                           privileges |
2932                                                           BE_PRIV_FILTMGMT,
2933                                                           vf + 1);
2934                         if (!status)
2935                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2936                                          vf);
2937                 }
2938
2939                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2940                  * Allow full available bandwidth
2941                  */
2942                 if (BE3_chip(adapter) && !old_vfs)
2943                         be_cmd_set_qos(adapter, 1000, vf+1);
2944
2945                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2946                                                   NULL, vf + 1);
2947                 if (!status)
2948                         vf_cfg->tx_rate = lnk_speed;
2949
2950                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2951                                                vf + 1, vf_cfg->if_handle);
2952                 if (status)
2953                         goto err;
2954                 vf_cfg->def_vid = def_vlan;
2955
2956                 be_cmd_enable_vf(adapter, vf + 1);
2957         }
2958
2959         if (!old_vfs) {
2960                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2961                 if (status) {
2962                         dev_err(dev, "SRIOV enable failed\n");
2963                         adapter->num_vfs = 0;
2964                         goto err;
2965                 }
2966         }
2967         return 0;
2968 err:
2969         dev_err(dev, "VF setup failed\n");
2970         be_vf_clear(adapter);
2971         return status;
2972 }
2973
2974 static void be_setup_init(struct be_adapter *adapter)
2975 {
2976         adapter->vlan_prio_bmap = 0xff;
2977         adapter->phy.link_speed = -1;
2978         adapter->if_handle = -1;
2979         adapter->be3_native = false;
2980         adapter->promiscuous = false;
2981         if (be_physfn(adapter))
2982                 adapter->cmd_privileges = MAX_PRIVILEGES;
2983         else
2984                 adapter->cmd_privileges = MIN_PRIVILEGES;
2985 }
2986
2987 static void be_get_resources(struct be_adapter *adapter)
2988 {
2989         u16 dev_num_vfs;
2990         int pos, status;
2991         bool profile_present = false;
2992         u16 txq_count = 0;
2993
2994         if (!BEx_chip(adapter)) {
2995                 status = be_cmd_get_func_config(adapter);
2996                 if (!status)
2997                         profile_present = true;
2998         } else if (BE3_chip(adapter) && be_physfn(adapter)) {
2999                 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3000         }
3001
3002         if (profile_present) {
3003                 /* Sanity fixes for Lancer */
3004                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3005                                               BE_UC_PMAC_COUNT);
3006                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3007                                            BE_NUM_VLANS_SUPPORTED);
3008                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3009                                                BE_MAX_MC);
3010                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3011                                                MAX_TX_QS);
3012                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3013                                                 BE3_MAX_RSS_QS);
3014                 adapter->max_event_queues = min_t(u16,
3015                                                   adapter->max_event_queues,
3016                                                   BE3_MAX_RSS_QS);
3017
3018                 if (adapter->max_rss_queues &&
3019                     adapter->max_rss_queues == adapter->max_rx_queues)
3020                         adapter->max_rss_queues -= 1;
3021
3022                 if (adapter->max_event_queues < adapter->max_rss_queues)
3023                         adapter->max_rss_queues = adapter->max_event_queues;
3024
3025         } else {
3026                 if (be_physfn(adapter))
3027                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3028                 else
3029                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3030
3031                 if (adapter->function_mode & FLEX10_MODE)
3032                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3033                 else
3034                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3035
3036                 adapter->max_mcast_mac = BE_MAX_MC;
3037                 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3038                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3039                                                MAX_TX_QS);
3040                 adapter->max_rss_queues = (adapter->be3_native) ?
3041                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3042                 adapter->max_event_queues = BE3_MAX_RSS_QS;
3043
3044                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3045                                         BE_IF_FLAGS_BROADCAST |
3046                                         BE_IF_FLAGS_MULTICAST |
3047                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
3048                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
3049                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
3050                                         BE_IF_FLAGS_PROMISCUOUS;
3051
3052                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3053                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3054         }
3055
3056         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3057         if (pos) {
3058                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3059                                      &dev_num_vfs);
3060                 if (BE3_chip(adapter))
3061                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3062                 adapter->dev_num_vfs = dev_num_vfs;
3063         }
3064 }
3065
3066 /* Routine to query per function resource limits */
3067 static int be_get_config(struct be_adapter *adapter)
3068 {
3069         int status;
3070
3071         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3072                                      &adapter->function_mode,
3073                                      &adapter->function_caps,
3074                                      &adapter->asic_rev);
3075         if (status)
3076                 goto err;
3077
3078         be_get_resources(adapter);
3079
3080         /* primary mac needs 1 pmac entry */
3081         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3082                                    sizeof(u32), GFP_KERNEL);
3083         if (!adapter->pmac_id) {
3084                 status = -ENOMEM;
3085                 goto err;
3086         }
3087
3088 err:
3089         return status;
3090 }
3091
3092 static int be_mac_setup(struct be_adapter *adapter)
3093 {
3094         u8 mac[ETH_ALEN];
3095         int status;
3096
3097         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3098                 status = be_cmd_get_perm_mac(adapter, mac);
3099                 if (status)
3100                         return status;
3101
3102                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3103                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3104         } else {
3105                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3106                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3107         }
3108
3109         /* On BE3 VFs this cmd may fail due to lack of privilege.
3110          * Ignore the failure as in this case pmac_id is fetched
3111          * in the IFACE_CREATE cmd.
3112          */
3113         be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3114                         &adapter->pmac_id[0], 0);
3115         return 0;
3116 }
3117
3118 static int be_setup(struct be_adapter *adapter)
3119 {
3120         struct device *dev = &adapter->pdev->dev;
3121         u32 en_flags;
3122         u32 tx_fc, rx_fc;
3123         int status;
3124
3125         be_setup_init(adapter);
3126
3127         if (!lancer_chip(adapter))
3128                 be_cmd_req_native_mode(adapter);
3129
3130         status = be_get_config(adapter);
3131         if (status)
3132                 goto err;
3133
3134         status = be_msix_enable(adapter);
3135         if (status)
3136                 goto err;
3137
3138         status = be_evt_queues_create(adapter);
3139         if (status)
3140                 goto err;
3141
3142         status = be_tx_cqs_create(adapter);
3143         if (status)
3144                 goto err;
3145
3146         status = be_rx_cqs_create(adapter);
3147         if (status)
3148                 goto err;
3149
3150         status = be_mcc_queues_create(adapter);
3151         if (status)
3152                 goto err;
3153
3154         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3155         /* In UMC mode FW does not return right privileges.
3156          * Override with correct privilege equivalent to PF.
3157          */
3158         if (be_is_mc(adapter))
3159                 adapter->cmd_privileges = MAX_PRIVILEGES;
3160
3161         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3162                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3163         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3164                 en_flags |= BE_IF_FLAGS_RSS;
3165         en_flags = en_flags & adapter->if_cap_flags;
3166         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3167                                   &adapter->if_handle, 0);
3168         if (status != 0)
3169                 goto err;
3170
3171         status = be_mac_setup(adapter);
3172         if (status)
3173                 goto err;
3174
3175         status = be_tx_qs_create(adapter);
3176         if (status)
3177                 goto err;
3178
3179         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3180
3181         if (adapter->vlans_added)
3182                 be_vid_config(adapter);
3183
3184         be_set_rx_mode(adapter->netdev);
3185
3186         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3187
3188         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3189                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3190                                         adapter->rx_fc);
3191
3192         if (be_physfn(adapter)) {
3193                 if (adapter->dev_num_vfs)
3194                         be_vf_setup(adapter);
3195                 else
3196                         dev_warn(dev, "device doesn't support SRIOV\n");
3197         }
3198
3199         status = be_cmd_get_phy_info(adapter);
3200         if (!status && be_pause_supported(adapter))
3201                 adapter->phy.fc_autoneg = 1;
3202
3203         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3204         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3205         return 0;
3206 err:
3207         be_clear(adapter);
3208         return status;
3209 }
3210
3211 #ifdef CONFIG_NET_POLL_CONTROLLER
3212 static void be_netpoll(struct net_device *netdev)
3213 {
3214         struct be_adapter *adapter = netdev_priv(netdev);
3215         struct be_eq_obj *eqo;
3216         int i;
3217
3218         for_all_evt_queues(adapter, eqo, i) {
3219                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3220                 napi_schedule(&eqo->napi);
3221         }
3222
3223         return;
3224 }
3225 #endif
3226
3227 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3228 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3229
3230 static bool be_flash_redboot(struct be_adapter *adapter,
3231                         const u8 *p, u32 img_start, int image_size,
3232                         int hdr_size)
3233 {
3234         u32 crc_offset;
3235         u8 flashed_crc[4];
3236         int status;
3237
3238         crc_offset = hdr_size + img_start + image_size - 4;
3239
3240         p += crc_offset;
3241
3242         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3243                         (image_size - 4));
3244         if (status) {
3245                 dev_err(&adapter->pdev->dev,
3246                 "could not get crc from flash, not flashing redboot\n");
3247                 return false;
3248         }
3249
3250         /*update redboot only if crc does not match*/
3251         if (!memcmp(flashed_crc, p, 4))
3252                 return false;
3253         else
3254                 return true;
3255 }
3256
3257 static bool phy_flashing_required(struct be_adapter *adapter)
3258 {
3259         return (adapter->phy.phy_type == TN_8022 &&
3260                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3261 }
3262
3263 static bool is_comp_in_ufi(struct be_adapter *adapter,
3264                            struct flash_section_info *fsec, int type)
3265 {
3266         int i = 0, img_type = 0;
3267         struct flash_section_info_g2 *fsec_g2 = NULL;
3268
3269         if (BE2_chip(adapter))
3270                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3271
3272         for (i = 0; i < MAX_FLASH_COMP; i++) {
3273                 if (fsec_g2)
3274                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3275                 else
3276                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3277
3278                 if (img_type == type)
3279                         return true;
3280         }
3281         return false;
3282
3283 }
3284
3285 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3286                                          int header_size,
3287                                          const struct firmware *fw)
3288 {
3289         struct flash_section_info *fsec = NULL;
3290         const u8 *p = fw->data;
3291
3292         p += header_size;
3293         while (p < (fw->data + fw->size)) {
3294                 fsec = (struct flash_section_info *)p;
3295                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3296                         return fsec;
3297                 p += 32;
3298         }
3299         return NULL;
3300 }
3301
3302 static int be_flash(struct be_adapter *adapter, const u8 *img,
3303                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3304 {
3305         u32 total_bytes = 0, flash_op, num_bytes = 0;
3306         int status = 0;
3307         struct be_cmd_write_flashrom *req = flash_cmd->va;
3308
3309         total_bytes = img_size;
3310         while (total_bytes) {
3311                 num_bytes = min_t(u32, 32*1024, total_bytes);
3312
3313                 total_bytes -= num_bytes;
3314
3315                 if (!total_bytes) {
3316                         if (optype == OPTYPE_PHY_FW)
3317                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3318                         else
3319                                 flash_op = FLASHROM_OPER_FLASH;
3320                 } else {
3321                         if (optype == OPTYPE_PHY_FW)
3322                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3323                         else
3324                                 flash_op = FLASHROM_OPER_SAVE;
3325                 }
3326
3327                 memcpy(req->data_buf, img, num_bytes);
3328                 img += num_bytes;
3329                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3330                                                 flash_op, num_bytes);
3331                 if (status) {
3332                         if (status == ILLEGAL_IOCTL_REQ &&
3333                             optype == OPTYPE_PHY_FW)
3334                                 break;
3335                         dev_err(&adapter->pdev->dev,
3336                                 "cmd to write to flash rom failed.\n");
3337                         return status;
3338                 }
3339         }
3340         return 0;
3341 }
3342
3343 /* For BE2, BE3 and BE3-R */
3344 static int be_flash_BEx(struct be_adapter *adapter,
3345                          const struct firmware *fw,
3346                          struct be_dma_mem *flash_cmd,
3347                          int num_of_images)
3348
3349 {
3350         int status = 0, i, filehdr_size = 0;
3351         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3352         const u8 *p = fw->data;
3353         const struct flash_comp *pflashcomp;
3354         int num_comp, redboot;
3355         struct flash_section_info *fsec = NULL;
3356
3357         struct flash_comp gen3_flash_types[] = {
3358                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3359                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3360                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3361                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3362                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3363                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3364                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3365                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3366                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3367                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3368                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3369                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3370                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3371                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3372                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3373                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3374                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3375                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3376                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3377                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3378         };
3379
3380         struct flash_comp gen2_flash_types[] = {
3381                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3382                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3383                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3384                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3385                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3386                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3387                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3388                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3389                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3390                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3391                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3392                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3393                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3394                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3395                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3396                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3397         };
3398
3399         if (BE3_chip(adapter)) {
3400                 pflashcomp = gen3_flash_types;
3401                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3402                 num_comp = ARRAY_SIZE(gen3_flash_types);
3403         } else {
3404                 pflashcomp = gen2_flash_types;
3405                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3406                 num_comp = ARRAY_SIZE(gen2_flash_types);
3407         }
3408
3409         /* Get flash section info*/
3410         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3411         if (!fsec) {
3412                 dev_err(&adapter->pdev->dev,
3413                         "Invalid Cookie. UFI corrupted ?\n");
3414                 return -1;
3415         }
3416         for (i = 0; i < num_comp; i++) {
3417                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3418                         continue;
3419
3420                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3421                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3422                         continue;
3423
3424                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3425                     !phy_flashing_required(adapter))
3426                                 continue;
3427
3428                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3429                         redboot = be_flash_redboot(adapter, fw->data,
3430                                 pflashcomp[i].offset, pflashcomp[i].size,
3431                                 filehdr_size + img_hdrs_size);
3432                         if (!redboot)
3433                                 continue;
3434                 }
3435
3436                 p = fw->data;
3437                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3438                 if (p + pflashcomp[i].size > fw->data + fw->size)
3439                         return -1;
3440
3441                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3442                                         pflashcomp[i].size);
3443                 if (status) {
3444                         dev_err(&adapter->pdev->dev,
3445                                 "Flashing section type %d failed.\n",
3446                                 pflashcomp[i].img_type);
3447                         return status;
3448                 }
3449         }
3450         return 0;
3451 }
3452
3453 static int be_flash_skyhawk(struct be_adapter *adapter,
3454                 const struct firmware *fw,
3455                 struct be_dma_mem *flash_cmd, int num_of_images)
3456 {
3457         int status = 0, i, filehdr_size = 0;
3458         int img_offset, img_size, img_optype, redboot;
3459         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3460         const u8 *p = fw->data;
3461         struct flash_section_info *fsec = NULL;
3462
3463         filehdr_size = sizeof(struct flash_file_hdr_g3);
3464         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3465         if (!fsec) {
3466                 dev_err(&adapter->pdev->dev,
3467                         "Invalid Cookie. UFI corrupted ?\n");
3468                 return -1;
3469         }
3470
3471         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3472                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3473                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3474
3475                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3476                 case IMAGE_FIRMWARE_iSCSI:
3477                         img_optype = OPTYPE_ISCSI_ACTIVE;
3478                         break;
3479                 case IMAGE_BOOT_CODE:
3480                         img_optype = OPTYPE_REDBOOT;
3481                         break;
3482                 case IMAGE_OPTION_ROM_ISCSI:
3483                         img_optype = OPTYPE_BIOS;
3484                         break;
3485                 case IMAGE_OPTION_ROM_PXE:
3486                         img_optype = OPTYPE_PXE_BIOS;
3487                         break;
3488                 case IMAGE_OPTION_ROM_FCoE:
3489                         img_optype = OPTYPE_FCOE_BIOS;
3490                         break;
3491                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3492                         img_optype = OPTYPE_ISCSI_BACKUP;
3493                         break;
3494                 case IMAGE_NCSI:
3495                         img_optype = OPTYPE_NCSI_FW;
3496                         break;
3497                 default:
3498                         continue;
3499                 }
3500
3501                 if (img_optype == OPTYPE_REDBOOT) {
3502                         redboot = be_flash_redboot(adapter, fw->data,
3503                                         img_offset, img_size,
3504                                         filehdr_size + img_hdrs_size);
3505                         if (!redboot)
3506                                 continue;
3507                 }
3508
3509                 p = fw->data;
3510                 p += filehdr_size + img_offset + img_hdrs_size;
3511                 if (p + img_size > fw->data + fw->size)
3512                         return -1;
3513
3514                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3515                 if (status) {
3516                         dev_err(&adapter->pdev->dev,
3517                                 "Flashing section type %d failed.\n",
3518                                 fsec->fsec_entry[i].type);
3519                         return status;
3520                 }
3521         }
3522         return 0;
3523 }
3524
3525 static int lancer_fw_download(struct be_adapter *adapter,
3526                                 const struct firmware *fw)
3527 {
3528 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3529 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3530         struct be_dma_mem flash_cmd;
3531         const u8 *data_ptr = NULL;
3532         u8 *dest_image_ptr = NULL;
3533         size_t image_size = 0;
3534         u32 chunk_size = 0;
3535         u32 data_written = 0;
3536         u32 offset = 0;
3537         int status = 0;
3538         u8 add_status = 0;
3539         u8 change_status;
3540
3541         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3542                 dev_err(&adapter->pdev->dev,
3543                         "FW Image not properly aligned. "
3544                         "Length must be 4 byte aligned.\n");
3545                 status = -EINVAL;
3546                 goto lancer_fw_exit;
3547         }
3548
3549         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3550                                 + LANCER_FW_DOWNLOAD_CHUNK;
3551         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3552                                           &flash_cmd.dma, GFP_KERNEL);
3553         if (!flash_cmd.va) {
3554                 status = -ENOMEM;
3555                 goto lancer_fw_exit;
3556         }
3557
3558         dest_image_ptr = flash_cmd.va +
3559                                 sizeof(struct lancer_cmd_req_write_object);
3560         image_size = fw->size;
3561         data_ptr = fw->data;
3562
3563         while (image_size) {
3564                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3565
3566                 /* Copy the image chunk content. */
3567                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3568
3569                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3570                                                  chunk_size, offset,
3571                                                  LANCER_FW_DOWNLOAD_LOCATION,
3572                                                  &data_written, &change_status,
3573                                                  &add_status);
3574                 if (status)
3575                         break;
3576
3577                 offset += data_written;
3578                 data_ptr += data_written;
3579                 image_size -= data_written;
3580         }
3581
3582         if (!status) {
3583                 /* Commit the FW written */
3584                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3585                                                  0, offset,
3586                                                  LANCER_FW_DOWNLOAD_LOCATION,
3587                                                  &data_written, &change_status,
3588                                                  &add_status);
3589         }
3590
3591         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3592                                 flash_cmd.dma);
3593         if (status) {
3594                 dev_err(&adapter->pdev->dev,
3595                         "Firmware load error. "
3596                         "Status code: 0x%x Additional Status: 0x%x\n",
3597                         status, add_status);
3598                 goto lancer_fw_exit;
3599         }
3600
3601         if (change_status == LANCER_FW_RESET_NEEDED) {
3602                 status = lancer_physdev_ctrl(adapter,
3603                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3604                 if (status) {
3605                         dev_err(&adapter->pdev->dev,
3606                                 "Adapter busy for FW reset.\n"
3607                                 "New FW will not be active.\n");
3608                         goto lancer_fw_exit;
3609                 }
3610         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3611                         dev_err(&adapter->pdev->dev,
3612                                 "System reboot required for new FW"
3613                                 " to be active\n");
3614         }
3615
3616         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3617 lancer_fw_exit:
3618         return status;
3619 }
3620
3621 #define UFI_TYPE2               2
3622 #define UFI_TYPE3               3
3623 #define UFI_TYPE3R              10
3624 #define UFI_TYPE4               4
3625 static int be_get_ufi_type(struct be_adapter *adapter,
3626                            struct flash_file_hdr_g3 *fhdr)
3627 {
3628         if (fhdr == NULL)
3629                 goto be_get_ufi_exit;
3630
3631         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3632                 return UFI_TYPE4;
3633         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3634                 if (fhdr->asic_type_rev == 0x10)
3635                         return UFI_TYPE3R;
3636                 else
3637                         return UFI_TYPE3;
3638         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3639                 return UFI_TYPE2;
3640
3641 be_get_ufi_exit:
3642         dev_err(&adapter->pdev->dev,
3643                 "UFI and Interface are not compatible for flashing\n");
3644         return -1;
3645 }
3646
3647 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3648 {
3649         struct flash_file_hdr_g3 *fhdr3;
3650         struct image_hdr *img_hdr_ptr = NULL;
3651         struct be_dma_mem flash_cmd;
3652         const u8 *p;
3653         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3654
3655         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3656         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3657                                           &flash_cmd.dma, GFP_KERNEL);
3658         if (!flash_cmd.va) {
3659                 status = -ENOMEM;
3660                 goto be_fw_exit;
3661         }
3662
3663         p = fw->data;
3664         fhdr3 = (struct flash_file_hdr_g3 *)p;
3665
3666         ufi_type = be_get_ufi_type(adapter, fhdr3);
3667
3668         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3669         for (i = 0; i < num_imgs; i++) {
3670                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3671                                 (sizeof(struct flash_file_hdr_g3) +
3672                                  i * sizeof(struct image_hdr)));
3673                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3674                         switch (ufi_type) {
3675                         case UFI_TYPE4:
3676                                 status = be_flash_skyhawk(adapter, fw,
3677                                                         &flash_cmd, num_imgs);
3678                                 break;
3679                         case UFI_TYPE3R:
3680                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3681                                                       num_imgs);
3682                                 break;
3683                         case UFI_TYPE3:
3684                                 /* Do not flash this ufi on BE3-R cards */
3685                                 if (adapter->asic_rev < 0x10)
3686                                         status = be_flash_BEx(adapter, fw,
3687                                                               &flash_cmd,
3688                                                               num_imgs);
3689                                 else {
3690                                         status = -1;
3691                                         dev_err(&adapter->pdev->dev,
3692                                                 "Can't load BE3 UFI on BE3R\n");
3693                                 }
3694                         }
3695                 }
3696         }
3697
3698         if (ufi_type == UFI_TYPE2)
3699                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3700         else if (ufi_type == -1)
3701                 status = -1;
3702
3703         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3704                           flash_cmd.dma);
3705         if (status) {
3706                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3707                 goto be_fw_exit;
3708         }
3709
3710         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3711
3712 be_fw_exit:
3713         return status;
3714 }
3715
3716 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3717 {
3718         const struct firmware *fw;
3719         int status;
3720
3721         if (!netif_running(adapter->netdev)) {
3722                 dev_err(&adapter->pdev->dev,
3723                         "Firmware load not allowed (interface is down)\n");
3724                 return -1;
3725         }
3726
3727         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3728         if (status)
3729                 goto fw_exit;
3730
3731         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3732
3733         if (lancer_chip(adapter))
3734                 status = lancer_fw_download(adapter, fw);
3735         else
3736                 status = be_fw_download(adapter, fw);
3737
3738         if (!status)
3739                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3740                                   adapter->fw_on_flash);
3741
3742 fw_exit:
3743         release_firmware(fw);
3744         return status;
3745 }
3746
3747 static const struct net_device_ops be_netdev_ops = {
3748         .ndo_open               = be_open,
3749         .ndo_stop               = be_close,
3750         .ndo_start_xmit         = be_xmit,
3751         .ndo_set_rx_mode        = be_set_rx_mode,
3752         .ndo_set_mac_address    = be_mac_addr_set,
3753         .ndo_change_mtu         = be_change_mtu,
3754         .ndo_get_stats64        = be_get_stats64,
3755         .ndo_validate_addr      = eth_validate_addr,
3756         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3757         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3758         .ndo_set_vf_mac         = be_set_vf_mac,
3759         .ndo_set_vf_vlan        = be_set_vf_vlan,
3760         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3761         .ndo_get_vf_config      = be_get_vf_config,
3762 #ifdef CONFIG_NET_POLL_CONTROLLER
3763         .ndo_poll_controller    = be_netpoll,
3764 #endif
3765 };
3766
3767 static void be_netdev_init(struct net_device *netdev)
3768 {
3769         struct be_adapter *adapter = netdev_priv(netdev);
3770         struct be_eq_obj *eqo;
3771         int i;
3772
3773         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3774                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3775                 NETIF_F_HW_VLAN_CTAG_TX;
3776         if (be_multi_rxq(adapter))
3777                 netdev->hw_features |= NETIF_F_RXHASH;
3778
3779         netdev->features |= netdev->hw_features |
3780                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3781
3782         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3783                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3784
3785         netdev->priv_flags |= IFF_UNICAST_FLT;
3786
3787         netdev->flags |= IFF_MULTICAST;
3788
3789         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3790
3791         netdev->netdev_ops = &be_netdev_ops;
3792
3793         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3794
3795         for_all_evt_queues(adapter, eqo, i)
3796                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3797 }
3798
3799 static void be_unmap_pci_bars(struct be_adapter *adapter)
3800 {
3801         if (adapter->csr)
3802                 pci_iounmap(adapter->pdev, adapter->csr);
3803         if (adapter->db)
3804                 pci_iounmap(adapter->pdev, adapter->db);
3805 }
3806
3807 static int db_bar(struct be_adapter *adapter)
3808 {
3809         if (lancer_chip(adapter) || !be_physfn(adapter))
3810                 return 0;
3811         else
3812                 return 4;
3813 }
3814
3815 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3816 {
3817         if (skyhawk_chip(adapter)) {
3818                 adapter->roce_db.size = 4096;
3819                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3820                                                               db_bar(adapter));
3821                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3822                                                                db_bar(adapter));
3823         }
3824         return 0;
3825 }
3826
3827 static int be_map_pci_bars(struct be_adapter *adapter)
3828 {
3829         u8 __iomem *addr;
3830         u32 sli_intf;
3831
3832         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3833         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3834                                 SLI_INTF_IF_TYPE_SHIFT;
3835
3836         if (BEx_chip(adapter) && be_physfn(adapter)) {
3837                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3838                 if (adapter->csr == NULL)
3839                         return -ENOMEM;
3840         }
3841
3842         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3843         if (addr == NULL)
3844                 goto pci_map_err;
3845         adapter->db = addr;
3846
3847         be_roce_map_pci_bars(adapter);
3848         return 0;
3849
3850 pci_map_err:
3851         be_unmap_pci_bars(adapter);
3852         return -ENOMEM;
3853 }
3854
3855 static void be_ctrl_cleanup(struct be_adapter *adapter)
3856 {
3857         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3858
3859         be_unmap_pci_bars(adapter);
3860
3861         if (mem->va)
3862                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3863                                   mem->dma);
3864
3865         mem = &adapter->rx_filter;
3866         if (mem->va)
3867                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3868                                   mem->dma);
3869 }
3870
3871 static int be_ctrl_init(struct be_adapter *adapter)
3872 {
3873         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3874         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3875         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3876         u32 sli_intf;
3877         int status;
3878
3879         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3880         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3881                                  SLI_INTF_FAMILY_SHIFT;
3882         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3883
3884         status = be_map_pci_bars(adapter);
3885         if (status)
3886                 goto done;
3887
3888         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3889         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3890                                                 mbox_mem_alloc->size,
3891                                                 &mbox_mem_alloc->dma,
3892                                                 GFP_KERNEL);
3893         if (!mbox_mem_alloc->va) {
3894                 status = -ENOMEM;
3895                 goto unmap_pci_bars;
3896         }
3897         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3898         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3899         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3900         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3901
3902         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3903         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3904                                            &rx_filter->dma,
3905                                            GFP_KERNEL | __GFP_ZERO);
3906         if (rx_filter->va == NULL) {
3907                 status = -ENOMEM;
3908                 goto free_mbox;
3909         }
3910
3911         mutex_init(&adapter->mbox_lock);
3912         spin_lock_init(&adapter->mcc_lock);
3913         spin_lock_init(&adapter->mcc_cq_lock);
3914
3915         init_completion(&adapter->flash_compl);
3916         pci_save_state(adapter->pdev);
3917         return 0;
3918
3919 free_mbox:
3920         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3921                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3922
3923 unmap_pci_bars:
3924         be_unmap_pci_bars(adapter);
3925
3926 done:
3927         return status;
3928 }
3929
3930 static void be_stats_cleanup(struct be_adapter *adapter)
3931 {
3932         struct be_dma_mem *cmd = &adapter->stats_cmd;
3933
3934         if (cmd->va)
3935                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3936                                   cmd->va, cmd->dma);
3937 }
3938
3939 static int be_stats_init(struct be_adapter *adapter)
3940 {
3941         struct be_dma_mem *cmd = &adapter->stats_cmd;
3942
3943         if (lancer_chip(adapter))
3944                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3945         else if (BE2_chip(adapter))
3946                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3947         else
3948                 /* BE3 and Skyhawk */
3949                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3950
3951         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3952                                      GFP_KERNEL | __GFP_ZERO);
3953         if (cmd->va == NULL)
3954                 return -1;
3955         return 0;
3956 }
3957
3958 static void be_remove(struct pci_dev *pdev)
3959 {
3960         struct be_adapter *adapter = pci_get_drvdata(pdev);
3961
3962         if (!adapter)
3963                 return;
3964
3965         be_roce_dev_remove(adapter);
3966         be_intr_set(adapter, false);
3967
3968         cancel_delayed_work_sync(&adapter->func_recovery_work);
3969
3970         unregister_netdev(adapter->netdev);
3971
3972         be_clear(adapter);
3973
3974         /* tell fw we're done with firing cmds */
3975         be_cmd_fw_clean(adapter);
3976
3977         be_stats_cleanup(adapter);
3978
3979         be_ctrl_cleanup(adapter);
3980
3981         pci_disable_pcie_error_reporting(pdev);
3982
3983         pci_set_drvdata(pdev, NULL);
3984         pci_release_regions(pdev);
3985         pci_disable_device(pdev);
3986
3987         free_netdev(adapter->netdev);
3988 }
3989
3990 bool be_is_wol_supported(struct be_adapter *adapter)
3991 {
3992         return ((adapter->wol_cap & BE_WOL_CAP) &&
3993                 !be_is_wol_excluded(adapter)) ? true : false;
3994 }
3995
3996 u32 be_get_fw_log_level(struct be_adapter *adapter)
3997 {
3998         struct be_dma_mem extfat_cmd;
3999         struct be_fat_conf_params *cfgs;
4000         int status;
4001         u32 level = 0;
4002         int j;
4003
4004         if (lancer_chip(adapter))
4005                 return 0;
4006
4007         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4008         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4009         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4010                                              &extfat_cmd.dma);
4011
4012         if (!extfat_cmd.va) {
4013                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4014                         __func__);
4015                 goto err;
4016         }
4017
4018         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4019         if (!status) {
4020                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4021                                                 sizeof(struct be_cmd_resp_hdr));
4022                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4023                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4024                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4025                 }
4026         }
4027         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4028                             extfat_cmd.dma);
4029 err:
4030         return level;
4031 }
4032
4033 static int be_get_initial_config(struct be_adapter *adapter)
4034 {
4035         int status;
4036         u32 level;
4037
4038         status = be_cmd_get_cntl_attributes(adapter);
4039         if (status)
4040                 return status;
4041
4042         status = be_cmd_get_acpi_wol_cap(adapter);
4043         if (status) {
4044                 /* in case of a failure to get wol capabillities
4045                  * check the exclusion list to determine WOL capability */
4046                 if (!be_is_wol_excluded(adapter))
4047                         adapter->wol_cap |= BE_WOL_CAP;
4048         }
4049
4050         if (be_is_wol_supported(adapter))
4051                 adapter->wol = true;
4052
4053         /* Must be a power of 2 or else MODULO will BUG_ON */
4054         adapter->be_get_temp_freq = 64;
4055
4056         level = be_get_fw_log_level(adapter);
4057         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4058
4059         return 0;
4060 }
4061
4062 static int lancer_recover_func(struct be_adapter *adapter)
4063 {
4064         struct device *dev = &adapter->pdev->dev;
4065         int status;
4066
4067         status = lancer_test_and_set_rdy_state(adapter);
4068         if (status)
4069                 goto err;
4070
4071         if (netif_running(adapter->netdev))
4072                 be_close(adapter->netdev);
4073
4074         be_clear(adapter);
4075
4076         be_clear_all_error(adapter);
4077
4078         status = be_setup(adapter);
4079         if (status)
4080                 goto err;
4081
4082         if (netif_running(adapter->netdev)) {
4083                 status = be_open(adapter->netdev);
4084                 if (status)
4085                         goto err;
4086         }
4087
4088         dev_err(dev, "Error recovery successful\n");
4089         return 0;
4090 err:
4091         if (status == -EAGAIN)
4092                 dev_err(dev, "Waiting for resource provisioning\n");
4093         else
4094                 dev_err(dev, "Error recovery failed\n");
4095
4096         return status;
4097 }
4098
4099 static void be_func_recovery_task(struct work_struct *work)
4100 {
4101         struct be_adapter *adapter =
4102                 container_of(work, struct be_adapter,  func_recovery_work.work);
4103         int status = 0;
4104
4105         be_detect_error(adapter);
4106
4107         if (adapter->hw_error && lancer_chip(adapter)) {
4108
4109                 rtnl_lock();
4110                 netif_device_detach(adapter->netdev);
4111                 rtnl_unlock();
4112
4113                 status = lancer_recover_func(adapter);
4114                 if (!status)
4115                         netif_device_attach(adapter->netdev);
4116         }
4117
4118         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4119          * no need to attempt further recovery.
4120          */
4121         if (!status || status == -EAGAIN)
4122                 schedule_delayed_work(&adapter->func_recovery_work,
4123                                       msecs_to_jiffies(1000));
4124 }
4125
4126 static void be_worker(struct work_struct *work)
4127 {
4128         struct be_adapter *adapter =
4129                 container_of(work, struct be_adapter, work.work);
4130         struct be_rx_obj *rxo;
4131         struct be_eq_obj *eqo;
4132         int i;
4133
4134         /* when interrupts are not yet enabled, just reap any pending
4135         * mcc completions */
4136         if (!netif_running(adapter->netdev)) {
4137                 local_bh_disable();
4138                 be_process_mcc(adapter);
4139                 local_bh_enable();
4140                 goto reschedule;
4141         }
4142
4143         if (!adapter->stats_cmd_sent) {
4144                 if (lancer_chip(adapter))
4145                         lancer_cmd_get_pport_stats(adapter,
4146                                                 &adapter->stats_cmd);
4147                 else
4148                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4149         }
4150
4151         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4152                 be_cmd_get_die_temperature(adapter);
4153
4154         for_all_rx_queues(adapter, rxo, i) {
4155                 if (rxo->rx_post_starved) {
4156                         rxo->rx_post_starved = false;
4157                         be_post_rx_frags(rxo, GFP_KERNEL);
4158                 }
4159         }
4160
4161         for_all_evt_queues(adapter, eqo, i)
4162                 be_eqd_update(adapter, eqo);
4163
4164 reschedule:
4165         adapter->work_counter++;
4166         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4167 }
4168
4169 /* If any VFs are already enabled don't FLR the PF */
4170 static bool be_reset_required(struct be_adapter *adapter)
4171 {
4172         return pci_num_vf(adapter->pdev) ? false : true;
4173 }
4174
4175 static char *mc_name(struct be_adapter *adapter)
4176 {
4177         if (adapter->function_mode & FLEX10_MODE)
4178                 return "FLEX10";
4179         else if (adapter->function_mode & VNIC_MODE)
4180                 return "vNIC";
4181         else if (adapter->function_mode & UMC_ENABLED)
4182                 return "UMC";
4183         else
4184                 return "";
4185 }
4186
4187 static inline char *func_name(struct be_adapter *adapter)
4188 {
4189         return be_physfn(adapter) ? "PF" : "VF";
4190 }
4191
4192 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4193 {
4194         int status = 0;
4195         struct be_adapter *adapter;
4196         struct net_device *netdev;
4197         char port_name;
4198
4199         status = pci_enable_device(pdev);
4200         if (status)
4201                 goto do_none;
4202
4203         status = pci_request_regions(pdev, DRV_NAME);
4204         if (status)
4205                 goto disable_dev;
4206         pci_set_master(pdev);
4207
4208         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4209         if (netdev == NULL) {
4210                 status = -ENOMEM;
4211                 goto rel_reg;
4212         }
4213         adapter = netdev_priv(netdev);
4214         adapter->pdev = pdev;
4215         pci_set_drvdata(pdev, adapter);
4216         adapter->netdev = netdev;
4217         SET_NETDEV_DEV(netdev, &pdev->dev);
4218
4219         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4220         if (!status) {
4221                 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4222                 if (status < 0) {
4223                         dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4224                         goto free_netdev;
4225                 }
4226                 netdev->features |= NETIF_F_HIGHDMA;
4227         } else {
4228                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4229                 if (!status)
4230                         status = dma_set_coherent_mask(&pdev->dev,
4231                                                        DMA_BIT_MASK(32));
4232                 if (status) {
4233                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4234                         goto free_netdev;
4235                 }
4236         }
4237
4238         status = pci_enable_pcie_error_reporting(pdev);
4239         if (status)
4240                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4241
4242         status = be_ctrl_init(adapter);
4243         if (status)
4244                 goto free_netdev;
4245
4246         /* sync up with fw's ready state */
4247         if (be_physfn(adapter)) {
4248                 status = be_fw_wait_ready(adapter);
4249                 if (status)
4250                         goto ctrl_clean;
4251         }
4252
4253         if (be_reset_required(adapter)) {
4254                 status = be_cmd_reset_function(adapter);
4255                 if (status)
4256                         goto ctrl_clean;
4257
4258                 /* Wait for interrupts to quiesce after an FLR */
4259                 msleep(100);
4260         }
4261
4262         /* Allow interrupts for other ULPs running on NIC function */
4263         be_intr_set(adapter, true);
4264
4265         /* tell fw we're ready to fire cmds */
4266         status = be_cmd_fw_init(adapter);
4267         if (status)
4268                 goto ctrl_clean;
4269
4270         status = be_stats_init(adapter);
4271         if (status)
4272                 goto ctrl_clean;
4273
4274         status = be_get_initial_config(adapter);
4275         if (status)
4276                 goto stats_clean;
4277
4278         INIT_DELAYED_WORK(&adapter->work, be_worker);
4279         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4280         adapter->rx_fc = adapter->tx_fc = true;
4281
4282         status = be_setup(adapter);
4283         if (status)
4284                 goto stats_clean;
4285
4286         be_netdev_init(netdev);
4287         status = register_netdev(netdev);
4288         if (status != 0)
4289                 goto unsetup;
4290
4291         be_roce_dev_add(adapter);
4292
4293         schedule_delayed_work(&adapter->func_recovery_work,
4294                               msecs_to_jiffies(1000));
4295
4296         be_cmd_query_port_name(adapter, &port_name);
4297
4298         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4299                  func_name(adapter), mc_name(adapter), port_name);
4300
4301         return 0;
4302
4303 unsetup:
4304         be_clear(adapter);
4305 stats_clean:
4306         be_stats_cleanup(adapter);
4307 ctrl_clean:
4308         be_ctrl_cleanup(adapter);
4309 free_netdev:
4310         free_netdev(netdev);
4311         pci_set_drvdata(pdev, NULL);
4312 rel_reg:
4313         pci_release_regions(pdev);
4314 disable_dev:
4315         pci_disable_device(pdev);
4316 do_none:
4317         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4318         return status;
4319 }
4320
4321 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4322 {
4323         struct be_adapter *adapter = pci_get_drvdata(pdev);
4324         struct net_device *netdev =  adapter->netdev;
4325
4326         if (adapter->wol)
4327                 be_setup_wol(adapter, true);
4328
4329         cancel_delayed_work_sync(&adapter->func_recovery_work);
4330
4331         netif_device_detach(netdev);
4332         if (netif_running(netdev)) {
4333                 rtnl_lock();
4334                 be_close(netdev);
4335                 rtnl_unlock();
4336         }
4337         be_clear(adapter);
4338
4339         pci_save_state(pdev);
4340         pci_disable_device(pdev);
4341         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4342         return 0;
4343 }
4344
4345 static int be_resume(struct pci_dev *pdev)
4346 {
4347         int status = 0;
4348         struct be_adapter *adapter = pci_get_drvdata(pdev);
4349         struct net_device *netdev =  adapter->netdev;
4350
4351         netif_device_detach(netdev);
4352
4353         status = pci_enable_device(pdev);
4354         if (status)
4355                 return status;
4356
4357         pci_set_power_state(pdev, PCI_D0);
4358         pci_restore_state(pdev);
4359
4360         /* tell fw we're ready to fire cmds */
4361         status = be_cmd_fw_init(adapter);
4362         if (status)
4363                 return status;
4364
4365         be_setup(adapter);
4366         if (netif_running(netdev)) {
4367                 rtnl_lock();
4368                 be_open(netdev);
4369                 rtnl_unlock();
4370         }
4371
4372         schedule_delayed_work(&adapter->func_recovery_work,
4373                               msecs_to_jiffies(1000));
4374         netif_device_attach(netdev);
4375
4376         if (adapter->wol)
4377                 be_setup_wol(adapter, false);
4378
4379         return 0;
4380 }
4381
4382 /*
4383  * An FLR will stop BE from DMAing any data.
4384  */
4385 static void be_shutdown(struct pci_dev *pdev)
4386 {
4387         struct be_adapter *adapter = pci_get_drvdata(pdev);
4388
4389         if (!adapter)
4390                 return;
4391
4392         cancel_delayed_work_sync(&adapter->work);
4393         cancel_delayed_work_sync(&adapter->func_recovery_work);
4394
4395         netif_device_detach(adapter->netdev);
4396
4397         be_cmd_reset_function(adapter);
4398
4399         pci_disable_device(pdev);
4400 }
4401
4402 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4403                                 pci_channel_state_t state)
4404 {
4405         struct be_adapter *adapter = pci_get_drvdata(pdev);
4406         struct net_device *netdev =  adapter->netdev;
4407
4408         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4409
4410         if (!adapter->eeh_error) {
4411                 adapter->eeh_error = true;
4412
4413                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4414
4415                 rtnl_lock();
4416                 netif_device_detach(netdev);
4417                 if (netif_running(netdev))
4418                         be_close(netdev);
4419                 rtnl_unlock();
4420
4421                 be_clear(adapter);
4422         }
4423
4424         if (state == pci_channel_io_perm_failure)
4425                 return PCI_ERS_RESULT_DISCONNECT;
4426
4427         pci_disable_device(pdev);
4428
4429         /* The error could cause the FW to trigger a flash debug dump.
4430          * Resetting the card while flash dump is in progress
4431          * can cause it not to recover; wait for it to finish.
4432          * Wait only for first function as it is needed only once per
4433          * adapter.
4434          */
4435         if (pdev->devfn == 0)
4436                 ssleep(30);
4437
4438         return PCI_ERS_RESULT_NEED_RESET;
4439 }
4440
4441 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4442 {
4443         struct be_adapter *adapter = pci_get_drvdata(pdev);
4444         int status;
4445
4446         dev_info(&adapter->pdev->dev, "EEH reset\n");
4447
4448         status = pci_enable_device(pdev);
4449         if (status)
4450                 return PCI_ERS_RESULT_DISCONNECT;
4451
4452         pci_set_master(pdev);
4453         pci_set_power_state(pdev, PCI_D0);
4454         pci_restore_state(pdev);
4455
4456         /* Check if card is ok and fw is ready */
4457         dev_info(&adapter->pdev->dev,
4458                  "Waiting for FW to be ready after EEH reset\n");
4459         status = be_fw_wait_ready(adapter);
4460         if (status)
4461                 return PCI_ERS_RESULT_DISCONNECT;
4462
4463         pci_cleanup_aer_uncorrect_error_status(pdev);
4464         be_clear_all_error(adapter);
4465         return PCI_ERS_RESULT_RECOVERED;
4466 }
4467
4468 static void be_eeh_resume(struct pci_dev *pdev)
4469 {
4470         int status = 0;
4471         struct be_adapter *adapter = pci_get_drvdata(pdev);
4472         struct net_device *netdev =  adapter->netdev;
4473
4474         dev_info(&adapter->pdev->dev, "EEH resume\n");
4475
4476         pci_save_state(pdev);
4477
4478         status = be_cmd_reset_function(adapter);
4479         if (status)
4480                 goto err;
4481
4482         /* tell fw we're ready to fire cmds */
4483         status = be_cmd_fw_init(adapter);
4484         if (status)
4485                 goto err;
4486
4487         status = be_setup(adapter);
4488         if (status)
4489                 goto err;
4490
4491         if (netif_running(netdev)) {
4492                 status = be_open(netdev);
4493                 if (status)
4494                         goto err;
4495         }
4496
4497         schedule_delayed_work(&adapter->func_recovery_work,
4498                               msecs_to_jiffies(1000));
4499         netif_device_attach(netdev);
4500         return;
4501 err:
4502         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4503 }
4504
4505 static const struct pci_error_handlers be_eeh_handlers = {
4506         .error_detected = be_eeh_err_detected,
4507         .slot_reset = be_eeh_reset,
4508         .resume = be_eeh_resume,
4509 };
4510
4511 static struct pci_driver be_driver = {
4512         .name = DRV_NAME,
4513         .id_table = be_dev_ids,
4514         .probe = be_probe,
4515         .remove = be_remove,
4516         .suspend = be_suspend,
4517         .resume = be_resume,
4518         .shutdown = be_shutdown,
4519         .err_handler = &be_eeh_handlers
4520 };
4521
4522 static int __init be_init_module(void)
4523 {
4524         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4525             rx_frag_size != 2048) {
4526                 printk(KERN_WARNING DRV_NAME
4527                         " : Module param rx_frag_size must be 2048/4096/8192."
4528                         " Using 2048\n");
4529                 rx_frag_size = 2048;
4530         }
4531
4532         return pci_register_driver(&be_driver);
4533 }
4534 module_init(be_init_module);
4535
4536 static void __exit be_exit_module(void)
4537 {
4538         pci_unregister_driver(&be_driver);
4539 }
4540 module_exit(be_exit_module);