95c331abee438338244a6345322c2608e226ec4a
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2014 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26
27 /* Local includes */
28 #include "i40e.h"
29 #include "i40e_diag.h"
30 #ifdef CONFIG_I40E_VXLAN
31 #include <net/vxlan.h>
32 #endif
33
34 const char i40e_driver_name[] = "i40e";
35 static const char i40e_driver_string[] =
36                         "Intel(R) Ethernet Connection XL710 Network Driver";
37
38 #define DRV_KERN "-k"
39
40 #define DRV_VERSION_MAJOR 0
41 #define DRV_VERSION_MINOR 4
42 #define DRV_VERSION_BUILD 10
43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44              __stringify(DRV_VERSION_MINOR) "." \
45              __stringify(DRV_VERSION_BUILD)    DRV_KERN
46 const char i40e_driver_version_str[] = DRV_VERSION;
47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
48
49 /* a bit of forward declarations */
50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51 static void i40e_handle_reset_warning(struct i40e_pf *pf);
52 static int i40e_add_vsi(struct i40e_vsi *vsi);
53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
55 static int i40e_setup_misc_vector(struct i40e_pf *pf);
56 static void i40e_determine_queue_usage(struct i40e_pf *pf);
57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
58 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
59 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
60
61 /* i40e_pci_tbl - PCI Device ID Table
62  *
63  * Last entry must be all 0s
64  *
65  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66  *   Class, Class Mask, private data (not used) }
67  */
68 static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
69         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
74         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
75         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
76         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
77         /* required last entry */
78         {0, }
79 };
80 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
81
82 #define I40E_MAX_VF_COUNT 128
83 static int debug = -1;
84 module_param(debug, int, 0);
85 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
86
87 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
88 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION);
91
92 /**
93  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
94  * @hw:   pointer to the HW structure
95  * @mem:  ptr to mem struct to fill out
96  * @size: size of memory requested
97  * @alignment: what to align the allocation to
98  **/
99 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
100                             u64 size, u32 alignment)
101 {
102         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
103
104         mem->size = ALIGN(size, alignment);
105         mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
106                                       &mem->pa, GFP_KERNEL);
107         if (!mem->va)
108                 return -ENOMEM;
109
110         return 0;
111 }
112
113 /**
114  * i40e_free_dma_mem_d - OS specific memory free for shared code
115  * @hw:   pointer to the HW structure
116  * @mem:  ptr to mem struct to free
117  **/
118 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
119 {
120         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
121
122         dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
123         mem->va = NULL;
124         mem->pa = 0;
125         mem->size = 0;
126
127         return 0;
128 }
129
130 /**
131  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
132  * @hw:   pointer to the HW structure
133  * @mem:  ptr to mem struct to fill out
134  * @size: size of memory requested
135  **/
136 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
137                              u32 size)
138 {
139         mem->size = size;
140         mem->va = kzalloc(size, GFP_KERNEL);
141
142         if (!mem->va)
143                 return -ENOMEM;
144
145         return 0;
146 }
147
148 /**
149  * i40e_free_virt_mem_d - OS specific memory free for shared code
150  * @hw:   pointer to the HW structure
151  * @mem:  ptr to mem struct to free
152  **/
153 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
154 {
155         /* it's ok to kfree a NULL pointer */
156         kfree(mem->va);
157         mem->va = NULL;
158         mem->size = 0;
159
160         return 0;
161 }
162
163 /**
164  * i40e_get_lump - find a lump of free generic resource
165  * @pf: board private structure
166  * @pile: the pile of resource to search
167  * @needed: the number of items needed
168  * @id: an owner id to stick on the items assigned
169  *
170  * Returns the base item index of the lump, or negative for error
171  *
172  * The search_hint trick and lack of advanced fit-finding only work
173  * because we're highly likely to have all the same size lump requests.
174  * Linear search time and any fragmentation should be minimal.
175  **/
176 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
177                          u16 needed, u16 id)
178 {
179         int ret = -ENOMEM;
180         int i, j;
181
182         if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
183                 dev_info(&pf->pdev->dev,
184                          "param err: pile=%p needed=%d id=0x%04x\n",
185                          pile, needed, id);
186                 return -EINVAL;
187         }
188
189         /* start the linear search with an imperfect hint */
190         i = pile->search_hint;
191         while (i < pile->num_entries) {
192                 /* skip already allocated entries */
193                 if (pile->list[i] & I40E_PILE_VALID_BIT) {
194                         i++;
195                         continue;
196                 }
197
198                 /* do we have enough in this lump? */
199                 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
200                         if (pile->list[i+j] & I40E_PILE_VALID_BIT)
201                                 break;
202                 }
203
204                 if (j == needed) {
205                         /* there was enough, so assign it to the requestor */
206                         for (j = 0; j < needed; j++)
207                                 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
208                         ret = i;
209                         pile->search_hint = i + j;
210                         break;
211                 } else {
212                         /* not enough, so skip over it and continue looking */
213                         i += j;
214                 }
215         }
216
217         return ret;
218 }
219
220 /**
221  * i40e_put_lump - return a lump of generic resource
222  * @pile: the pile of resource to search
223  * @index: the base item index
224  * @id: the owner id of the items assigned
225  *
226  * Returns the count of items in the lump
227  **/
228 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
229 {
230         int valid_id = (id | I40E_PILE_VALID_BIT);
231         int count = 0;
232         int i;
233
234         if (!pile || index >= pile->num_entries)
235                 return -EINVAL;
236
237         for (i = index;
238              i < pile->num_entries && pile->list[i] == valid_id;
239              i++) {
240                 pile->list[i] = 0;
241                 count++;
242         }
243
244         if (count && index < pile->search_hint)
245                 pile->search_hint = index;
246
247         return count;
248 }
249
250 /**
251  * i40e_service_event_schedule - Schedule the service task to wake up
252  * @pf: board private structure
253  *
254  * If not already scheduled, this puts the task into the work queue
255  **/
256 static void i40e_service_event_schedule(struct i40e_pf *pf)
257 {
258         if (!test_bit(__I40E_DOWN, &pf->state) &&
259             !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
260             !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
261                 schedule_work(&pf->service_task);
262 }
263
264 /**
265  * i40e_tx_timeout - Respond to a Tx Hang
266  * @netdev: network interface device structure
267  *
268  * If any port has noticed a Tx timeout, it is likely that the whole
269  * device is munged, not just the one netdev port, so go for the full
270  * reset.
271  **/
272 static void i40e_tx_timeout(struct net_device *netdev)
273 {
274         struct i40e_netdev_priv *np = netdev_priv(netdev);
275         struct i40e_vsi *vsi = np->vsi;
276         struct i40e_pf *pf = vsi->back;
277
278         pf->tx_timeout_count++;
279
280         if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
281                 pf->tx_timeout_recovery_level = 0;
282         pf->tx_timeout_last_recovery = jiffies;
283         netdev_info(netdev, "tx_timeout recovery level %d\n",
284                     pf->tx_timeout_recovery_level);
285
286         switch (pf->tx_timeout_recovery_level) {
287         case 0:
288                 /* disable and re-enable queues for the VSI */
289                 if (in_interrupt()) {
290                         set_bit(__I40E_REINIT_REQUESTED, &pf->state);
291                         set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
292                 } else {
293                         i40e_vsi_reinit_locked(vsi);
294                 }
295                 break;
296         case 1:
297                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
298                 break;
299         case 2:
300                 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
301                 break;
302         case 3:
303                 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
304                 break;
305         default:
306                 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
307                 set_bit(__I40E_DOWN, &vsi->state);
308                 i40e_down(vsi);
309                 break;
310         }
311         i40e_service_event_schedule(pf);
312         pf->tx_timeout_recovery_level++;
313 }
314
315 /**
316  * i40e_release_rx_desc - Store the new tail and head values
317  * @rx_ring: ring to bump
318  * @val: new head index
319  **/
320 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
321 {
322         rx_ring->next_to_use = val;
323
324         /* Force memory writes to complete before letting h/w
325          * know there are new descriptors to fetch.  (Only
326          * applicable for weak-ordered memory model archs,
327          * such as IA-64).
328          */
329         wmb();
330         writel(val, rx_ring->tail);
331 }
332
333 /**
334  * i40e_get_vsi_stats_struct - Get System Network Statistics
335  * @vsi: the VSI we care about
336  *
337  * Returns the address of the device statistics structure.
338  * The statistics are actually updated from the service task.
339  **/
340 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
341 {
342         return &vsi->net_stats;
343 }
344
345 /**
346  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
347  * @netdev: network interface device structure
348  *
349  * Returns the address of the device statistics structure.
350  * The statistics are actually updated from the service task.
351  **/
352 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
353                                              struct net_device *netdev,
354                                              struct rtnl_link_stats64 *stats)
355 {
356         struct i40e_netdev_priv *np = netdev_priv(netdev);
357         struct i40e_ring *tx_ring, *rx_ring;
358         struct i40e_vsi *vsi = np->vsi;
359         struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
360         int i;
361
362         if (test_bit(__I40E_DOWN, &vsi->state))
363                 return stats;
364
365         if (!vsi->tx_rings)
366                 return stats;
367
368         rcu_read_lock();
369         for (i = 0; i < vsi->num_queue_pairs; i++) {
370                 u64 bytes, packets;
371                 unsigned int start;
372
373                 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
374                 if (!tx_ring)
375                         continue;
376
377                 do {
378                         start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
379                         packets = tx_ring->stats.packets;
380                         bytes   = tx_ring->stats.bytes;
381                 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
382
383                 stats->tx_packets += packets;
384                 stats->tx_bytes   += bytes;
385                 rx_ring = &tx_ring[1];
386
387                 do {
388                         start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
389                         packets = rx_ring->stats.packets;
390                         bytes   = rx_ring->stats.bytes;
391                 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
392
393                 stats->rx_packets += packets;
394                 stats->rx_bytes   += bytes;
395         }
396         rcu_read_unlock();
397
398         /* following stats updated by i40e_watchdog_subtask() */
399         stats->multicast        = vsi_stats->multicast;
400         stats->tx_errors        = vsi_stats->tx_errors;
401         stats->tx_dropped       = vsi_stats->tx_dropped;
402         stats->rx_errors        = vsi_stats->rx_errors;
403         stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
404         stats->rx_length_errors = vsi_stats->rx_length_errors;
405
406         return stats;
407 }
408
409 /**
410  * i40e_vsi_reset_stats - Resets all stats of the given vsi
411  * @vsi: the VSI to have its stats reset
412  **/
413 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
414 {
415         struct rtnl_link_stats64 *ns;
416         int i;
417
418         if (!vsi)
419                 return;
420
421         ns = i40e_get_vsi_stats_struct(vsi);
422         memset(ns, 0, sizeof(*ns));
423         memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
424         memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
425         memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
426         if (vsi->rx_rings && vsi->rx_rings[0]) {
427                 for (i = 0; i < vsi->num_queue_pairs; i++) {
428                         memset(&vsi->rx_rings[i]->stats, 0 ,
429                                sizeof(vsi->rx_rings[i]->stats));
430                         memset(&vsi->rx_rings[i]->rx_stats, 0 ,
431                                sizeof(vsi->rx_rings[i]->rx_stats));
432                         memset(&vsi->tx_rings[i]->stats, 0 ,
433                                sizeof(vsi->tx_rings[i]->stats));
434                         memset(&vsi->tx_rings[i]->tx_stats, 0,
435                                sizeof(vsi->tx_rings[i]->tx_stats));
436                 }
437         }
438         vsi->stat_offsets_loaded = false;
439 }
440
441 /**
442  * i40e_pf_reset_stats - Reset all of the stats for the given pf
443  * @pf: the PF to be reset
444  **/
445 void i40e_pf_reset_stats(struct i40e_pf *pf)
446 {
447         memset(&pf->stats, 0, sizeof(pf->stats));
448         memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
449         pf->stat_offsets_loaded = false;
450 }
451
452 /**
453  * i40e_stat_update48 - read and update a 48 bit stat from the chip
454  * @hw: ptr to the hardware info
455  * @hireg: the high 32 bit reg to read
456  * @loreg: the low 32 bit reg to read
457  * @offset_loaded: has the initial offset been loaded yet
458  * @offset: ptr to current offset value
459  * @stat: ptr to the stat
460  *
461  * Since the device stats are not reset at PFReset, they likely will not
462  * be zeroed when the driver starts.  We'll save the first values read
463  * and use them as offsets to be subtracted from the raw values in order
464  * to report stats that count from zero.  In the process, we also manage
465  * the potential roll-over.
466  **/
467 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
468                                bool offset_loaded, u64 *offset, u64 *stat)
469 {
470         u64 new_data;
471
472         if (hw->device_id == I40E_DEV_ID_QEMU) {
473                 new_data = rd32(hw, loreg);
474                 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
475         } else {
476                 new_data = rd64(hw, loreg);
477         }
478         if (!offset_loaded)
479                 *offset = new_data;
480         if (likely(new_data >= *offset))
481                 *stat = new_data - *offset;
482         else
483                 *stat = (new_data + ((u64)1 << 48)) - *offset;
484         *stat &= 0xFFFFFFFFFFFFULL;
485 }
486
487 /**
488  * i40e_stat_update32 - read and update a 32 bit stat from the chip
489  * @hw: ptr to the hardware info
490  * @reg: the hw reg to read
491  * @offset_loaded: has the initial offset been loaded yet
492  * @offset: ptr to current offset value
493  * @stat: ptr to the stat
494  **/
495 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
496                                bool offset_loaded, u64 *offset, u64 *stat)
497 {
498         u32 new_data;
499
500         new_data = rd32(hw, reg);
501         if (!offset_loaded)
502                 *offset = new_data;
503         if (likely(new_data >= *offset))
504                 *stat = (u32)(new_data - *offset);
505         else
506                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
507 }
508
509 /**
510  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
511  * @vsi: the VSI to be updated
512  **/
513 void i40e_update_eth_stats(struct i40e_vsi *vsi)
514 {
515         int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
516         struct i40e_pf *pf = vsi->back;
517         struct i40e_hw *hw = &pf->hw;
518         struct i40e_eth_stats *oes;
519         struct i40e_eth_stats *es;     /* device's eth stats */
520
521         es = &vsi->eth_stats;
522         oes = &vsi->eth_stats_offsets;
523
524         /* Gather up the stats that the hw collects */
525         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
526                            vsi->stat_offsets_loaded,
527                            &oes->tx_errors, &es->tx_errors);
528         i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
529                            vsi->stat_offsets_loaded,
530                            &oes->rx_discards, &es->rx_discards);
531         i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
532                            vsi->stat_offsets_loaded,
533                            &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
534         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
535                            vsi->stat_offsets_loaded,
536                            &oes->tx_errors, &es->tx_errors);
537
538         i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
539                            I40E_GLV_GORCL(stat_idx),
540                            vsi->stat_offsets_loaded,
541                            &oes->rx_bytes, &es->rx_bytes);
542         i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
543                            I40E_GLV_UPRCL(stat_idx),
544                            vsi->stat_offsets_loaded,
545                            &oes->rx_unicast, &es->rx_unicast);
546         i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
547                            I40E_GLV_MPRCL(stat_idx),
548                            vsi->stat_offsets_loaded,
549                            &oes->rx_multicast, &es->rx_multicast);
550         i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
551                            I40E_GLV_BPRCL(stat_idx),
552                            vsi->stat_offsets_loaded,
553                            &oes->rx_broadcast, &es->rx_broadcast);
554
555         i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
556                            I40E_GLV_GOTCL(stat_idx),
557                            vsi->stat_offsets_loaded,
558                            &oes->tx_bytes, &es->tx_bytes);
559         i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
560                            I40E_GLV_UPTCL(stat_idx),
561                            vsi->stat_offsets_loaded,
562                            &oes->tx_unicast, &es->tx_unicast);
563         i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
564                            I40E_GLV_MPTCL(stat_idx),
565                            vsi->stat_offsets_loaded,
566                            &oes->tx_multicast, &es->tx_multicast);
567         i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
568                            I40E_GLV_BPTCL(stat_idx),
569                            vsi->stat_offsets_loaded,
570                            &oes->tx_broadcast, &es->tx_broadcast);
571         vsi->stat_offsets_loaded = true;
572 }
573
574 /**
575  * i40e_update_veb_stats - Update Switch component statistics
576  * @veb: the VEB being updated
577  **/
578 static void i40e_update_veb_stats(struct i40e_veb *veb)
579 {
580         struct i40e_pf *pf = veb->pf;
581         struct i40e_hw *hw = &pf->hw;
582         struct i40e_eth_stats *oes;
583         struct i40e_eth_stats *es;     /* device's eth stats */
584         int idx = 0;
585
586         idx = veb->stats_idx;
587         es = &veb->stats;
588         oes = &veb->stats_offsets;
589
590         /* Gather up the stats that the hw collects */
591         i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
592                            veb->stat_offsets_loaded,
593                            &oes->tx_discards, &es->tx_discards);
594         if (hw->revision_id > 0)
595                 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
596                                    veb->stat_offsets_loaded,
597                                    &oes->rx_unknown_protocol,
598                                    &es->rx_unknown_protocol);
599         i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
600                            veb->stat_offsets_loaded,
601                            &oes->rx_bytes, &es->rx_bytes);
602         i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
603                            veb->stat_offsets_loaded,
604                            &oes->rx_unicast, &es->rx_unicast);
605         i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
606                            veb->stat_offsets_loaded,
607                            &oes->rx_multicast, &es->rx_multicast);
608         i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
609                            veb->stat_offsets_loaded,
610                            &oes->rx_broadcast, &es->rx_broadcast);
611
612         i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
613                            veb->stat_offsets_loaded,
614                            &oes->tx_bytes, &es->tx_bytes);
615         i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
616                            veb->stat_offsets_loaded,
617                            &oes->tx_unicast, &es->tx_unicast);
618         i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
619                            veb->stat_offsets_loaded,
620                            &oes->tx_multicast, &es->tx_multicast);
621         i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
622                            veb->stat_offsets_loaded,
623                            &oes->tx_broadcast, &es->tx_broadcast);
624         veb->stat_offsets_loaded = true;
625 }
626
627 /**
628  * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
629  * @pf: the corresponding PF
630  *
631  * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
632  **/
633 static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
634 {
635         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
636         struct i40e_hw_port_stats *nsd = &pf->stats;
637         struct i40e_hw *hw = &pf->hw;
638         u64 xoff = 0;
639         u16 i, v;
640
641         if ((hw->fc.current_mode != I40E_FC_FULL) &&
642             (hw->fc.current_mode != I40E_FC_RX_PAUSE))
643                 return;
644
645         xoff = nsd->link_xoff_rx;
646         i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
647                            pf->stat_offsets_loaded,
648                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
649
650         /* No new LFC xoff rx */
651         if (!(nsd->link_xoff_rx - xoff))
652                 return;
653
654         /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
655         for (v = 0; v < pf->num_alloc_vsi; v++) {
656                 struct i40e_vsi *vsi = pf->vsi[v];
657
658                 if (!vsi || !vsi->tx_rings[0])
659                         continue;
660
661                 for (i = 0; i < vsi->num_queue_pairs; i++) {
662                         struct i40e_ring *ring = vsi->tx_rings[i];
663                         clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
664                 }
665         }
666 }
667
668 /**
669  * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
670  * @pf: the corresponding PF
671  *
672  * Update the Rx XOFF counter (PAUSE frames) in PFC mode
673  **/
674 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
675 {
676         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
677         struct i40e_hw_port_stats *nsd = &pf->stats;
678         bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
679         struct i40e_dcbx_config *dcb_cfg;
680         struct i40e_hw *hw = &pf->hw;
681         u16 i, v;
682         u8 tc;
683
684         dcb_cfg = &hw->local_dcbx_config;
685
686         /* See if DCB enabled with PFC TC */
687         if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
688             !(dcb_cfg->pfc.pfcenable)) {
689                 i40e_update_link_xoff_rx(pf);
690                 return;
691         }
692
693         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
694                 u64 prio_xoff = nsd->priority_xoff_rx[i];
695                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
696                                    pf->stat_offsets_loaded,
697                                    &osd->priority_xoff_rx[i],
698                                    &nsd->priority_xoff_rx[i]);
699
700                 /* No new PFC xoff rx */
701                 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
702                         continue;
703                 /* Get the TC for given priority */
704                 tc = dcb_cfg->etscfg.prioritytable[i];
705                 xoff[tc] = true;
706         }
707
708         /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
709         for (v = 0; v < pf->num_alloc_vsi; v++) {
710                 struct i40e_vsi *vsi = pf->vsi[v];
711
712                 if (!vsi || !vsi->tx_rings[0])
713                         continue;
714
715                 for (i = 0; i < vsi->num_queue_pairs; i++) {
716                         struct i40e_ring *ring = vsi->tx_rings[i];
717
718                         tc = ring->dcb_tc;
719                         if (xoff[tc])
720                                 clear_bit(__I40E_HANG_CHECK_ARMED,
721                                           &ring->state);
722                 }
723         }
724 }
725
726 /**
727  * i40e_update_vsi_stats - Update the vsi statistics counters.
728  * @vsi: the VSI to be updated
729  *
730  * There are a few instances where we store the same stat in a
731  * couple of different structs.  This is partly because we have
732  * the netdev stats that need to be filled out, which is slightly
733  * different from the "eth_stats" defined by the chip and used in
734  * VF communications.  We sort it out here.
735  **/
736 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
737 {
738         struct i40e_pf *pf = vsi->back;
739         struct rtnl_link_stats64 *ons;
740         struct rtnl_link_stats64 *ns;   /* netdev stats */
741         struct i40e_eth_stats *oes;
742         struct i40e_eth_stats *es;     /* device's eth stats */
743         u32 tx_restart, tx_busy;
744         u32 rx_page, rx_buf;
745         u64 rx_p, rx_b;
746         u64 tx_p, tx_b;
747         u16 q;
748
749         if (test_bit(__I40E_DOWN, &vsi->state) ||
750             test_bit(__I40E_CONFIG_BUSY, &pf->state))
751                 return;
752
753         ns = i40e_get_vsi_stats_struct(vsi);
754         ons = &vsi->net_stats_offsets;
755         es = &vsi->eth_stats;
756         oes = &vsi->eth_stats_offsets;
757
758         /* Gather up the netdev and vsi stats that the driver collects
759          * on the fly during packet processing
760          */
761         rx_b = rx_p = 0;
762         tx_b = tx_p = 0;
763         tx_restart = tx_busy = 0;
764         rx_page = 0;
765         rx_buf = 0;
766         rcu_read_lock();
767         for (q = 0; q < vsi->num_queue_pairs; q++) {
768                 struct i40e_ring *p;
769                 u64 bytes, packets;
770                 unsigned int start;
771
772                 /* locate Tx ring */
773                 p = ACCESS_ONCE(vsi->tx_rings[q]);
774
775                 do {
776                         start = u64_stats_fetch_begin_irq(&p->syncp);
777                         packets = p->stats.packets;
778                         bytes = p->stats.bytes;
779                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
780                 tx_b += bytes;
781                 tx_p += packets;
782                 tx_restart += p->tx_stats.restart_queue;
783                 tx_busy += p->tx_stats.tx_busy;
784
785                 /* Rx queue is part of the same block as Tx queue */
786                 p = &p[1];
787                 do {
788                         start = u64_stats_fetch_begin_irq(&p->syncp);
789                         packets = p->stats.packets;
790                         bytes = p->stats.bytes;
791                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
792                 rx_b += bytes;
793                 rx_p += packets;
794                 rx_buf += p->rx_stats.alloc_buff_failed;
795                 rx_page += p->rx_stats.alloc_page_failed;
796         }
797         rcu_read_unlock();
798         vsi->tx_restart = tx_restart;
799         vsi->tx_busy = tx_busy;
800         vsi->rx_page_failed = rx_page;
801         vsi->rx_buf_failed = rx_buf;
802
803         ns->rx_packets = rx_p;
804         ns->rx_bytes = rx_b;
805         ns->tx_packets = tx_p;
806         ns->tx_bytes = tx_b;
807
808         /* update netdev stats from eth stats */
809         i40e_update_eth_stats(vsi);
810         ons->tx_errors = oes->tx_errors;
811         ns->tx_errors = es->tx_errors;
812         ons->multicast = oes->rx_multicast;
813         ns->multicast = es->rx_multicast;
814         ons->rx_dropped = oes->rx_discards;
815         ns->rx_dropped = es->rx_discards;
816         ons->tx_dropped = oes->tx_discards;
817         ns->tx_dropped = es->tx_discards;
818
819         /* pull in a couple PF stats if this is the main vsi */
820         if (vsi == pf->vsi[pf->lan_vsi]) {
821                 ns->rx_crc_errors = pf->stats.crc_errors;
822                 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
823                 ns->rx_length_errors = pf->stats.rx_length_errors;
824         }
825 }
826
827 /**
828  * i40e_update_pf_stats - Update the pf statistics counters.
829  * @pf: the PF to be updated
830  **/
831 static void i40e_update_pf_stats(struct i40e_pf *pf)
832 {
833         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
834         struct i40e_hw_port_stats *nsd = &pf->stats;
835         struct i40e_hw *hw = &pf->hw;
836         u32 val;
837         int i;
838
839         i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
840                            I40E_GLPRT_GORCL(hw->port),
841                            pf->stat_offsets_loaded,
842                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
843         i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
844                            I40E_GLPRT_GOTCL(hw->port),
845                            pf->stat_offsets_loaded,
846                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
847         i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
848                            pf->stat_offsets_loaded,
849                            &osd->eth.rx_discards,
850                            &nsd->eth.rx_discards);
851         i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
852                            pf->stat_offsets_loaded,
853                            &osd->eth.tx_discards,
854                            &nsd->eth.tx_discards);
855
856         i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
857                            I40E_GLPRT_UPRCL(hw->port),
858                            pf->stat_offsets_loaded,
859                            &osd->eth.rx_unicast,
860                            &nsd->eth.rx_unicast);
861         i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
862                            I40E_GLPRT_MPRCL(hw->port),
863                            pf->stat_offsets_loaded,
864                            &osd->eth.rx_multicast,
865                            &nsd->eth.rx_multicast);
866         i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
867                            I40E_GLPRT_BPRCL(hw->port),
868                            pf->stat_offsets_loaded,
869                            &osd->eth.rx_broadcast,
870                            &nsd->eth.rx_broadcast);
871         i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
872                            I40E_GLPRT_UPTCL(hw->port),
873                            pf->stat_offsets_loaded,
874                            &osd->eth.tx_unicast,
875                            &nsd->eth.tx_unicast);
876         i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
877                            I40E_GLPRT_MPTCL(hw->port),
878                            pf->stat_offsets_loaded,
879                            &osd->eth.tx_multicast,
880                            &nsd->eth.tx_multicast);
881         i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
882                            I40E_GLPRT_BPTCL(hw->port),
883                            pf->stat_offsets_loaded,
884                            &osd->eth.tx_broadcast,
885                            &nsd->eth.tx_broadcast);
886
887         i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
888                            pf->stat_offsets_loaded,
889                            &osd->tx_dropped_link_down,
890                            &nsd->tx_dropped_link_down);
891
892         i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
893                            pf->stat_offsets_loaded,
894                            &osd->crc_errors, &nsd->crc_errors);
895
896         i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
897                            pf->stat_offsets_loaded,
898                            &osd->illegal_bytes, &nsd->illegal_bytes);
899
900         i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
901                            pf->stat_offsets_loaded,
902                            &osd->mac_local_faults,
903                            &nsd->mac_local_faults);
904         i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
905                            pf->stat_offsets_loaded,
906                            &osd->mac_remote_faults,
907                            &nsd->mac_remote_faults);
908
909         i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
910                            pf->stat_offsets_loaded,
911                            &osd->rx_length_errors,
912                            &nsd->rx_length_errors);
913
914         i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
915                            pf->stat_offsets_loaded,
916                            &osd->link_xon_rx, &nsd->link_xon_rx);
917         i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
918                            pf->stat_offsets_loaded,
919                            &osd->link_xon_tx, &nsd->link_xon_tx);
920         i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */
921         i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
922                            pf->stat_offsets_loaded,
923                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
924
925         for (i = 0; i < 8; i++) {
926                 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
927                                    pf->stat_offsets_loaded,
928                                    &osd->priority_xon_rx[i],
929                                    &nsd->priority_xon_rx[i]);
930                 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
931                                    pf->stat_offsets_loaded,
932                                    &osd->priority_xon_tx[i],
933                                    &nsd->priority_xon_tx[i]);
934                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
935                                    pf->stat_offsets_loaded,
936                                    &osd->priority_xoff_tx[i],
937                                    &nsd->priority_xoff_tx[i]);
938                 i40e_stat_update32(hw,
939                                    I40E_GLPRT_RXON2OFFCNT(hw->port, i),
940                                    pf->stat_offsets_loaded,
941                                    &osd->priority_xon_2_xoff[i],
942                                    &nsd->priority_xon_2_xoff[i]);
943         }
944
945         i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
946                            I40E_GLPRT_PRC64L(hw->port),
947                            pf->stat_offsets_loaded,
948                            &osd->rx_size_64, &nsd->rx_size_64);
949         i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
950                            I40E_GLPRT_PRC127L(hw->port),
951                            pf->stat_offsets_loaded,
952                            &osd->rx_size_127, &nsd->rx_size_127);
953         i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
954                            I40E_GLPRT_PRC255L(hw->port),
955                            pf->stat_offsets_loaded,
956                            &osd->rx_size_255, &nsd->rx_size_255);
957         i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
958                            I40E_GLPRT_PRC511L(hw->port),
959                            pf->stat_offsets_loaded,
960                            &osd->rx_size_511, &nsd->rx_size_511);
961         i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
962                            I40E_GLPRT_PRC1023L(hw->port),
963                            pf->stat_offsets_loaded,
964                            &osd->rx_size_1023, &nsd->rx_size_1023);
965         i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
966                            I40E_GLPRT_PRC1522L(hw->port),
967                            pf->stat_offsets_loaded,
968                            &osd->rx_size_1522, &nsd->rx_size_1522);
969         i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
970                            I40E_GLPRT_PRC9522L(hw->port),
971                            pf->stat_offsets_loaded,
972                            &osd->rx_size_big, &nsd->rx_size_big);
973
974         i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
975                            I40E_GLPRT_PTC64L(hw->port),
976                            pf->stat_offsets_loaded,
977                            &osd->tx_size_64, &nsd->tx_size_64);
978         i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
979                            I40E_GLPRT_PTC127L(hw->port),
980                            pf->stat_offsets_loaded,
981                            &osd->tx_size_127, &nsd->tx_size_127);
982         i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
983                            I40E_GLPRT_PTC255L(hw->port),
984                            pf->stat_offsets_loaded,
985                            &osd->tx_size_255, &nsd->tx_size_255);
986         i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
987                            I40E_GLPRT_PTC511L(hw->port),
988                            pf->stat_offsets_loaded,
989                            &osd->tx_size_511, &nsd->tx_size_511);
990         i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
991                            I40E_GLPRT_PTC1023L(hw->port),
992                            pf->stat_offsets_loaded,
993                            &osd->tx_size_1023, &nsd->tx_size_1023);
994         i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
995                            I40E_GLPRT_PTC1522L(hw->port),
996                            pf->stat_offsets_loaded,
997                            &osd->tx_size_1522, &nsd->tx_size_1522);
998         i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
999                            I40E_GLPRT_PTC9522L(hw->port),
1000                            pf->stat_offsets_loaded,
1001                            &osd->tx_size_big, &nsd->tx_size_big);
1002
1003         i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1004                            pf->stat_offsets_loaded,
1005                            &osd->rx_undersize, &nsd->rx_undersize);
1006         i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1007                            pf->stat_offsets_loaded,
1008                            &osd->rx_fragments, &nsd->rx_fragments);
1009         i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1010                            pf->stat_offsets_loaded,
1011                            &osd->rx_oversize, &nsd->rx_oversize);
1012         i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1013                            pf->stat_offsets_loaded,
1014                            &osd->rx_jabber, &nsd->rx_jabber);
1015
1016         /* FDIR stats */
1017         i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
1018                            pf->stat_offsets_loaded,
1019                            &osd->fd_atr_match, &nsd->fd_atr_match);
1020         i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
1021                            pf->stat_offsets_loaded,
1022                            &osd->fd_sb_match, &nsd->fd_sb_match);
1023
1024         val = rd32(hw, I40E_PRTPM_EEE_STAT);
1025         nsd->tx_lpi_status =
1026                        (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1027                         I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1028         nsd->rx_lpi_status =
1029                        (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1030                         I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1031         i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1032                            pf->stat_offsets_loaded,
1033                            &osd->tx_lpi_count, &nsd->tx_lpi_count);
1034         i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1035                            pf->stat_offsets_loaded,
1036                            &osd->rx_lpi_count, &nsd->rx_lpi_count);
1037
1038         pf->stat_offsets_loaded = true;
1039 }
1040
1041 /**
1042  * i40e_update_stats - Update the various statistics counters.
1043  * @vsi: the VSI to be updated
1044  *
1045  * Update the various stats for this VSI and its related entities.
1046  **/
1047 void i40e_update_stats(struct i40e_vsi *vsi)
1048 {
1049         struct i40e_pf *pf = vsi->back;
1050
1051         if (vsi == pf->vsi[pf->lan_vsi])
1052                 i40e_update_pf_stats(pf);
1053
1054         i40e_update_vsi_stats(vsi);
1055 }
1056
1057 /**
1058  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1059  * @vsi: the VSI to be searched
1060  * @macaddr: the MAC address
1061  * @vlan: the vlan
1062  * @is_vf: make sure its a vf filter, else doesn't matter
1063  * @is_netdev: make sure its a netdev filter, else doesn't matter
1064  *
1065  * Returns ptr to the filter object or NULL
1066  **/
1067 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1068                                                 u8 *macaddr, s16 vlan,
1069                                                 bool is_vf, bool is_netdev)
1070 {
1071         struct i40e_mac_filter *f;
1072
1073         if (!vsi || !macaddr)
1074                 return NULL;
1075
1076         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1077                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1078                     (vlan == f->vlan)    &&
1079                     (!is_vf || f->is_vf) &&
1080                     (!is_netdev || f->is_netdev))
1081                         return f;
1082         }
1083         return NULL;
1084 }
1085
1086 /**
1087  * i40e_find_mac - Find a mac addr in the macvlan filters list
1088  * @vsi: the VSI to be searched
1089  * @macaddr: the MAC address we are searching for
1090  * @is_vf: make sure its a vf filter, else doesn't matter
1091  * @is_netdev: make sure its a netdev filter, else doesn't matter
1092  *
1093  * Returns the first filter with the provided MAC address or NULL if
1094  * MAC address was not found
1095  **/
1096 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1097                                       bool is_vf, bool is_netdev)
1098 {
1099         struct i40e_mac_filter *f;
1100
1101         if (!vsi || !macaddr)
1102                 return NULL;
1103
1104         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1105                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1106                     (!is_vf || f->is_vf) &&
1107                     (!is_netdev || f->is_netdev))
1108                         return f;
1109         }
1110         return NULL;
1111 }
1112
1113 /**
1114  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1115  * @vsi: the VSI to be searched
1116  *
1117  * Returns true if VSI is in vlan mode or false otherwise
1118  **/
1119 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1120 {
1121         struct i40e_mac_filter *f;
1122
1123         /* Only -1 for all the filters denotes not in vlan mode
1124          * so we have to go through all the list in order to make sure
1125          */
1126         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1127                 if (f->vlan >= 0)
1128                         return true;
1129         }
1130
1131         return false;
1132 }
1133
1134 /**
1135  * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1136  * @vsi: the VSI to be searched
1137  * @macaddr: the mac address to be filtered
1138  * @is_vf: true if it is a vf
1139  * @is_netdev: true if it is a netdev
1140  *
1141  * Goes through all the macvlan filters and adds a
1142  * macvlan filter for each unique vlan that already exists
1143  *
1144  * Returns first filter found on success, else NULL
1145  **/
1146 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1147                                              bool is_vf, bool is_netdev)
1148 {
1149         struct i40e_mac_filter *f;
1150
1151         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1152                 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1153                                       is_vf, is_netdev)) {
1154                         if (!i40e_add_filter(vsi, macaddr, f->vlan,
1155                                              is_vf, is_netdev))
1156                                 return NULL;
1157                 }
1158         }
1159
1160         return list_first_entry_or_null(&vsi->mac_filter_list,
1161                                         struct i40e_mac_filter, list);
1162 }
1163
1164 /**
1165  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1166  * @vsi: the PF Main VSI - inappropriate for any other VSI
1167  * @macaddr: the MAC address
1168  **/
1169 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1170 {
1171         struct i40e_aqc_remove_macvlan_element_data element;
1172         struct i40e_pf *pf = vsi->back;
1173         i40e_status aq_ret;
1174
1175         /* Only appropriate for the PF main VSI */
1176         if (vsi->type != I40E_VSI_MAIN)
1177                 return;
1178
1179         ether_addr_copy(element.mac_addr, macaddr);
1180         element.vlan_tag = 0;
1181         element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1182                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1183         aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1184         if (aq_ret)
1185                 dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n");
1186 }
1187
1188 /**
1189  * i40e_add_filter - Add a mac/vlan filter to the VSI
1190  * @vsi: the VSI to be searched
1191  * @macaddr: the MAC address
1192  * @vlan: the vlan
1193  * @is_vf: make sure its a vf filter, else doesn't matter
1194  * @is_netdev: make sure its a netdev filter, else doesn't matter
1195  *
1196  * Returns ptr to the filter object or NULL when no memory available.
1197  **/
1198 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1199                                         u8 *macaddr, s16 vlan,
1200                                         bool is_vf, bool is_netdev)
1201 {
1202         struct i40e_mac_filter *f;
1203
1204         if (!vsi || !macaddr)
1205                 return NULL;
1206
1207         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1208         if (!f) {
1209                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1210                 if (!f)
1211                         goto add_filter_out;
1212
1213                 ether_addr_copy(f->macaddr, macaddr);
1214                 f->vlan = vlan;
1215                 f->changed = true;
1216
1217                 INIT_LIST_HEAD(&f->list);
1218                 list_add(&f->list, &vsi->mac_filter_list);
1219         }
1220
1221         /* increment counter and add a new flag if needed */
1222         if (is_vf) {
1223                 if (!f->is_vf) {
1224                         f->is_vf = true;
1225                         f->counter++;
1226                 }
1227         } else if (is_netdev) {
1228                 if (!f->is_netdev) {
1229                         f->is_netdev = true;
1230                         f->counter++;
1231                 }
1232         } else {
1233                 f->counter++;
1234         }
1235
1236         /* changed tells sync_filters_subtask to
1237          * push the filter down to the firmware
1238          */
1239         if (f->changed) {
1240                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1241                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1242         }
1243
1244 add_filter_out:
1245         return f;
1246 }
1247
1248 /**
1249  * i40e_del_filter - Remove a mac/vlan filter from the VSI
1250  * @vsi: the VSI to be searched
1251  * @macaddr: the MAC address
1252  * @vlan: the vlan
1253  * @is_vf: make sure it's a vf filter, else doesn't matter
1254  * @is_netdev: make sure it's a netdev filter, else doesn't matter
1255  **/
1256 void i40e_del_filter(struct i40e_vsi *vsi,
1257                      u8 *macaddr, s16 vlan,
1258                      bool is_vf, bool is_netdev)
1259 {
1260         struct i40e_mac_filter *f;
1261
1262         if (!vsi || !macaddr)
1263                 return;
1264
1265         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1266         if (!f || f->counter == 0)
1267                 return;
1268
1269         if (is_vf) {
1270                 if (f->is_vf) {
1271                         f->is_vf = false;
1272                         f->counter--;
1273                 }
1274         } else if (is_netdev) {
1275                 if (f->is_netdev) {
1276                         f->is_netdev = false;
1277                         f->counter--;
1278                 }
1279         } else {
1280                 /* make sure we don't remove a filter in use by vf or netdev */
1281                 int min_f = 0;
1282                 min_f += (f->is_vf ? 1 : 0);
1283                 min_f += (f->is_netdev ? 1 : 0);
1284
1285                 if (f->counter > min_f)
1286                         f->counter--;
1287         }
1288
1289         /* counter == 0 tells sync_filters_subtask to
1290          * remove the filter from the firmware's list
1291          */
1292         if (f->counter == 0) {
1293                 f->changed = true;
1294                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1295                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1296         }
1297 }
1298
1299 /**
1300  * i40e_set_mac - NDO callback to set mac address
1301  * @netdev: network interface device structure
1302  * @p: pointer to an address structure
1303  *
1304  * Returns 0 on success, negative on failure
1305  **/
1306 static int i40e_set_mac(struct net_device *netdev, void *p)
1307 {
1308         struct i40e_netdev_priv *np = netdev_priv(netdev);
1309         struct i40e_vsi *vsi = np->vsi;
1310         struct sockaddr *addr = p;
1311         struct i40e_mac_filter *f;
1312
1313         if (!is_valid_ether_addr(addr->sa_data))
1314                 return -EADDRNOTAVAIL;
1315
1316         netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
1317
1318         if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
1319                 return 0;
1320
1321         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1322             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1323                 return -EADDRNOTAVAIL;
1324
1325         if (vsi->type == I40E_VSI_MAIN) {
1326                 i40e_status ret;
1327                 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1328                                                 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1329                                                 addr->sa_data, NULL);
1330                 if (ret) {
1331                         netdev_info(netdev,
1332                                     "Addr change for Main VSI failed: %d\n",
1333                                     ret);
1334                         return -EADDRNOTAVAIL;
1335                 }
1336
1337                 ether_addr_copy(vsi->back->hw.mac.addr, addr->sa_data);
1338         }
1339
1340         /* In order to be sure to not drop any packets, add the new address
1341          * then delete the old one.
1342          */
1343         f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
1344         if (!f)
1345                 return -ENOMEM;
1346
1347         i40e_sync_vsi_filters(vsi);
1348         i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1349         i40e_sync_vsi_filters(vsi);
1350
1351         ether_addr_copy(netdev->dev_addr, addr->sa_data);
1352
1353         return 0;
1354 }
1355
1356 /**
1357  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1358  * @vsi: the VSI being setup
1359  * @ctxt: VSI context structure
1360  * @enabled_tc: Enabled TCs bitmap
1361  * @is_add: True if called before Add VSI
1362  *
1363  * Setup VSI queue mapping for enabled traffic classes.
1364  **/
1365 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1366                                      struct i40e_vsi_context *ctxt,
1367                                      u8 enabled_tc,
1368                                      bool is_add)
1369 {
1370         struct i40e_pf *pf = vsi->back;
1371         u16 sections = 0;
1372         u8 netdev_tc = 0;
1373         u16 numtc = 0;
1374         u16 qcount;
1375         u8 offset;
1376         u16 qmap;
1377         int i;
1378         u16 num_tc_qps = 0;
1379
1380         sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1381         offset = 0;
1382
1383         if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1384                 /* Find numtc from enabled TC bitmap */
1385                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1386                         if (enabled_tc & (1 << i)) /* TC is enabled */
1387                                 numtc++;
1388                 }
1389                 if (!numtc) {
1390                         dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1391                         numtc = 1;
1392                 }
1393         } else {
1394                 /* At least TC0 is enabled in case of non-DCB case */
1395                 numtc = 1;
1396         }
1397
1398         vsi->tc_config.numtc = numtc;
1399         vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1400         /* Number of queues per enabled TC */
1401         num_tc_qps = vsi->alloc_queue_pairs/numtc;
1402         num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1403
1404         /* Setup queue offset/count for all TCs for given VSI */
1405         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1406                 /* See if the given TC is enabled for the given VSI */
1407                 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1408                         int pow, num_qps;
1409
1410                         switch (vsi->type) {
1411                         case I40E_VSI_MAIN:
1412                                 qcount = min_t(int, pf->rss_size, num_tc_qps);
1413                                 break;
1414                         case I40E_VSI_FDIR:
1415                         case I40E_VSI_SRIOV:
1416                         case I40E_VSI_VMDQ2:
1417                         default:
1418                                 qcount = num_tc_qps;
1419                                 WARN_ON(i != 0);
1420                                 break;
1421                         }
1422                         vsi->tc_config.tc_info[i].qoffset = offset;
1423                         vsi->tc_config.tc_info[i].qcount = qcount;
1424
1425                         /* find the power-of-2 of the number of queue pairs */
1426                         num_qps = qcount;
1427                         pow = 0;
1428                         while (num_qps && ((1 << pow) < qcount)) {
1429                                 pow++;
1430                                 num_qps >>= 1;
1431                         }
1432
1433                         vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1434                         qmap =
1435                             (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1436                             (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1437
1438                         offset += qcount;
1439                 } else {
1440                         /* TC is not enabled so set the offset to
1441                          * default queue and allocate one queue
1442                          * for the given TC.
1443                          */
1444                         vsi->tc_config.tc_info[i].qoffset = 0;
1445                         vsi->tc_config.tc_info[i].qcount = 1;
1446                         vsi->tc_config.tc_info[i].netdev_tc = 0;
1447
1448                         qmap = 0;
1449                 }
1450                 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1451         }
1452
1453         /* Set actual Tx/Rx queue pairs */
1454         vsi->num_queue_pairs = offset;
1455
1456         /* Scheduler section valid can only be set for ADD VSI */
1457         if (is_add) {
1458                 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1459
1460                 ctxt->info.up_enable_bits = enabled_tc;
1461         }
1462         if (vsi->type == I40E_VSI_SRIOV) {
1463                 ctxt->info.mapping_flags |=
1464                                      cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1465                 for (i = 0; i < vsi->num_queue_pairs; i++)
1466                         ctxt->info.queue_mapping[i] =
1467                                                cpu_to_le16(vsi->base_queue + i);
1468         } else {
1469                 ctxt->info.mapping_flags |=
1470                                         cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1471                 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1472         }
1473         ctxt->info.valid_sections |= cpu_to_le16(sections);
1474 }
1475
1476 /**
1477  * i40e_set_rx_mode - NDO callback to set the netdev filters
1478  * @netdev: network interface device structure
1479  **/
1480 static void i40e_set_rx_mode(struct net_device *netdev)
1481 {
1482         struct i40e_netdev_priv *np = netdev_priv(netdev);
1483         struct i40e_mac_filter *f, *ftmp;
1484         struct i40e_vsi *vsi = np->vsi;
1485         struct netdev_hw_addr *uca;
1486         struct netdev_hw_addr *mca;
1487         struct netdev_hw_addr *ha;
1488
1489         /* add addr if not already in the filter list */
1490         netdev_for_each_uc_addr(uca, netdev) {
1491                 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1492                         if (i40e_is_vsi_in_vlan(vsi))
1493                                 i40e_put_mac_in_vlan(vsi, uca->addr,
1494                                                      false, true);
1495                         else
1496                                 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1497                                                 false, true);
1498                 }
1499         }
1500
1501         netdev_for_each_mc_addr(mca, netdev) {
1502                 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1503                         if (i40e_is_vsi_in_vlan(vsi))
1504                                 i40e_put_mac_in_vlan(vsi, mca->addr,
1505                                                      false, true);
1506                         else
1507                                 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1508                                                 false, true);
1509                 }
1510         }
1511
1512         /* remove filter if not in netdev list */
1513         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1514                 bool found = false;
1515
1516                 if (!f->is_netdev)
1517                         continue;
1518
1519                 if (is_multicast_ether_addr(f->macaddr)) {
1520                         netdev_for_each_mc_addr(mca, netdev) {
1521                                 if (ether_addr_equal(mca->addr, f->macaddr)) {
1522                                         found = true;
1523                                         break;
1524                                 }
1525                         }
1526                 } else {
1527                         netdev_for_each_uc_addr(uca, netdev) {
1528                                 if (ether_addr_equal(uca->addr, f->macaddr)) {
1529                                         found = true;
1530                                         break;
1531                                 }
1532                         }
1533
1534                         for_each_dev_addr(netdev, ha) {
1535                                 if (ether_addr_equal(ha->addr, f->macaddr)) {
1536                                         found = true;
1537                                         break;
1538                                 }
1539                         }
1540                 }
1541                 if (!found)
1542                         i40e_del_filter(
1543                            vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1544         }
1545
1546         /* check for other flag changes */
1547         if (vsi->current_netdev_flags != vsi->netdev->flags) {
1548                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1549                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1550         }
1551 }
1552
1553 /**
1554  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1555  * @vsi: ptr to the VSI
1556  *
1557  * Push any outstanding VSI filter changes through the AdminQ.
1558  *
1559  * Returns 0 or error value
1560  **/
1561 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1562 {
1563         struct i40e_mac_filter *f, *ftmp;
1564         bool promisc_forced_on = false;
1565         bool add_happened = false;
1566         int filter_list_len = 0;
1567         u32 changed_flags = 0;
1568         i40e_status aq_ret = 0;
1569         struct i40e_pf *pf;
1570         int num_add = 0;
1571         int num_del = 0;
1572         u16 cmd_flags;
1573
1574         /* empty array typed pointers, kcalloc later */
1575         struct i40e_aqc_add_macvlan_element_data *add_list;
1576         struct i40e_aqc_remove_macvlan_element_data *del_list;
1577
1578         while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1579                 usleep_range(1000, 2000);
1580         pf = vsi->back;
1581
1582         if (vsi->netdev) {
1583                 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1584                 vsi->current_netdev_flags = vsi->netdev->flags;
1585         }
1586
1587         if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1588                 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1589
1590                 filter_list_len = pf->hw.aq.asq_buf_size /
1591                             sizeof(struct i40e_aqc_remove_macvlan_element_data);
1592                 del_list = kcalloc(filter_list_len,
1593                             sizeof(struct i40e_aqc_remove_macvlan_element_data),
1594                             GFP_KERNEL);
1595                 if (!del_list)
1596                         return -ENOMEM;
1597
1598                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1599                         if (!f->changed)
1600                                 continue;
1601
1602                         if (f->counter != 0)
1603                                 continue;
1604                         f->changed = false;
1605                         cmd_flags = 0;
1606
1607                         /* add to delete list */
1608                         ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1609                         del_list[num_del].vlan_tag =
1610                                 cpu_to_le16((u16)(f->vlan ==
1611                                             I40E_VLAN_ANY ? 0 : f->vlan));
1612
1613                         cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1614                         del_list[num_del].flags = cmd_flags;
1615                         num_del++;
1616
1617                         /* unlink from filter list */
1618                         list_del(&f->list);
1619                         kfree(f);
1620
1621                         /* flush a full buffer */
1622                         if (num_del == filter_list_len) {
1623                                 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1624                                             vsi->seid, del_list, num_del,
1625                                             NULL);
1626                                 num_del = 0;
1627                                 memset(del_list, 0, sizeof(*del_list));
1628
1629                                 if (aq_ret &&
1630                                     pf->hw.aq.asq_last_status !=
1631                                                               I40E_AQ_RC_ENOENT)
1632                                         dev_info(&pf->pdev->dev,
1633                                                  "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1634                                                  aq_ret,
1635                                                  pf->hw.aq.asq_last_status);
1636                         }
1637                 }
1638                 if (num_del) {
1639                         aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1640                                                      del_list, num_del, NULL);
1641                         num_del = 0;
1642
1643                         if (aq_ret &&
1644                             pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
1645                                 dev_info(&pf->pdev->dev,
1646                                          "ignoring delete macvlan error, err %d, aq_err %d\n",
1647                                          aq_ret, pf->hw.aq.asq_last_status);
1648                 }
1649
1650                 kfree(del_list);
1651                 del_list = NULL;
1652
1653                 /* do all the adds now */
1654                 filter_list_len = pf->hw.aq.asq_buf_size /
1655                                sizeof(struct i40e_aqc_add_macvlan_element_data),
1656                 add_list = kcalloc(filter_list_len,
1657                                sizeof(struct i40e_aqc_add_macvlan_element_data),
1658                                GFP_KERNEL);
1659                 if (!add_list)
1660                         return -ENOMEM;
1661
1662                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1663                         if (!f->changed)
1664                                 continue;
1665
1666                         if (f->counter == 0)
1667                                 continue;
1668                         f->changed = false;
1669                         add_happened = true;
1670                         cmd_flags = 0;
1671
1672                         /* add to add array */
1673                         ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1674                         add_list[num_add].vlan_tag =
1675                                 cpu_to_le16(
1676                                  (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1677                         add_list[num_add].queue_number = 0;
1678
1679                         cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1680                         add_list[num_add].flags = cpu_to_le16(cmd_flags);
1681                         num_add++;
1682
1683                         /* flush a full buffer */
1684                         if (num_add == filter_list_len) {
1685                                 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1686                                                              add_list, num_add,
1687                                                              NULL);
1688                                 num_add = 0;
1689
1690                                 if (aq_ret)
1691                                         break;
1692                                 memset(add_list, 0, sizeof(*add_list));
1693                         }
1694                 }
1695                 if (num_add) {
1696                         aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1697                                                      add_list, num_add, NULL);
1698                         num_add = 0;
1699                 }
1700                 kfree(add_list);
1701                 add_list = NULL;
1702
1703                 if (add_happened && (!aq_ret)) {
1704                         /* do nothing */;
1705                 } else if (add_happened && (aq_ret)) {
1706                         dev_info(&pf->pdev->dev,
1707                                  "add filter failed, err %d, aq_err %d\n",
1708                                  aq_ret, pf->hw.aq.asq_last_status);
1709                         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1710                             !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1711                                       &vsi->state)) {
1712                                 promisc_forced_on = true;
1713                                 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1714                                         &vsi->state);
1715                                 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1716                         }
1717                 }
1718         }
1719
1720         /* check for changes in promiscuous modes */
1721         if (changed_flags & IFF_ALLMULTI) {
1722                 bool cur_multipromisc;
1723                 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1724                 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1725                                                                vsi->seid,
1726                                                                cur_multipromisc,
1727                                                                NULL);
1728                 if (aq_ret)
1729                         dev_info(&pf->pdev->dev,
1730                                  "set multi promisc failed, err %d, aq_err %d\n",
1731                                  aq_ret, pf->hw.aq.asq_last_status);
1732         }
1733         if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1734                 bool cur_promisc;
1735                 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1736                                test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1737                                         &vsi->state));
1738                 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1739                                                              vsi->seid,
1740                                                              cur_promisc, NULL);
1741                 if (aq_ret)
1742                         dev_info(&pf->pdev->dev,
1743                                  "set uni promisc failed, err %d, aq_err %d\n",
1744                                  aq_ret, pf->hw.aq.asq_last_status);
1745                 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1746                                                    vsi->seid,
1747                                                    cur_promisc, NULL);
1748                 if (aq_ret)
1749                         dev_info(&pf->pdev->dev,
1750                                  "set brdcast promisc failed, err %d, aq_err %d\n",
1751                                  aq_ret, pf->hw.aq.asq_last_status);
1752         }
1753
1754         clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1755         return 0;
1756 }
1757
1758 /**
1759  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1760  * @pf: board private structure
1761  **/
1762 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1763 {
1764         int v;
1765
1766         if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1767                 return;
1768         pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1769
1770         for (v = 0; v < pf->num_alloc_vsi; v++) {
1771                 if (pf->vsi[v] &&
1772                     (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1773                         i40e_sync_vsi_filters(pf->vsi[v]);
1774         }
1775 }
1776
1777 /**
1778  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1779  * @netdev: network interface device structure
1780  * @new_mtu: new value for maximum frame size
1781  *
1782  * Returns 0 on success, negative on failure
1783  **/
1784 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1785 {
1786         struct i40e_netdev_priv *np = netdev_priv(netdev);
1787         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1788         struct i40e_vsi *vsi = np->vsi;
1789
1790         /* MTU < 68 is an error and causes problems on some kernels */
1791         if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1792                 return -EINVAL;
1793
1794         netdev_info(netdev, "changing MTU from %d to %d\n",
1795                     netdev->mtu, new_mtu);
1796         netdev->mtu = new_mtu;
1797         if (netif_running(netdev))
1798                 i40e_vsi_reinit_locked(vsi);
1799
1800         return 0;
1801 }
1802
1803 /**
1804  * i40e_ioctl - Access the hwtstamp interface
1805  * @netdev: network interface device structure
1806  * @ifr: interface request data
1807  * @cmd: ioctl command
1808  **/
1809 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1810 {
1811         struct i40e_netdev_priv *np = netdev_priv(netdev);
1812         struct i40e_pf *pf = np->vsi->back;
1813
1814         switch (cmd) {
1815         case SIOCGHWTSTAMP:
1816                 return i40e_ptp_get_ts_config(pf, ifr);
1817         case SIOCSHWTSTAMP:
1818                 return i40e_ptp_set_ts_config(pf, ifr);
1819         default:
1820                 return -EOPNOTSUPP;
1821         }
1822 }
1823
1824 /**
1825  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1826  * @vsi: the vsi being adjusted
1827  **/
1828 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1829 {
1830         struct i40e_vsi_context ctxt;
1831         i40e_status ret;
1832
1833         if ((vsi->info.valid_sections &
1834              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1835             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1836                 return;  /* already enabled */
1837
1838         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1839         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1840                                     I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1841
1842         ctxt.seid = vsi->seid;
1843         memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1844         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1845         if (ret) {
1846                 dev_info(&vsi->back->pdev->dev,
1847                          "%s: update vsi failed, aq_err=%d\n",
1848                          __func__, vsi->back->hw.aq.asq_last_status);
1849         }
1850 }
1851
1852 /**
1853  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1854  * @vsi: the vsi being adjusted
1855  **/
1856 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1857 {
1858         struct i40e_vsi_context ctxt;
1859         i40e_status ret;
1860
1861         if ((vsi->info.valid_sections &
1862              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1863             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1864              I40E_AQ_VSI_PVLAN_EMOD_MASK))
1865                 return;  /* already disabled */
1866
1867         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1868         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1869                                     I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1870
1871         ctxt.seid = vsi->seid;
1872         memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1873         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1874         if (ret) {
1875                 dev_info(&vsi->back->pdev->dev,
1876                          "%s: update vsi failed, aq_err=%d\n",
1877                          __func__, vsi->back->hw.aq.asq_last_status);
1878         }
1879 }
1880
1881 /**
1882  * i40e_vlan_rx_register - Setup or shutdown vlan offload
1883  * @netdev: network interface to be adjusted
1884  * @features: netdev features to test if VLAN offload is enabled or not
1885  **/
1886 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
1887 {
1888         struct i40e_netdev_priv *np = netdev_priv(netdev);
1889         struct i40e_vsi *vsi = np->vsi;
1890
1891         if (features & NETIF_F_HW_VLAN_CTAG_RX)
1892                 i40e_vlan_stripping_enable(vsi);
1893         else
1894                 i40e_vlan_stripping_disable(vsi);
1895 }
1896
1897 /**
1898  * i40e_vsi_add_vlan - Add vsi membership for given vlan
1899  * @vsi: the vsi being configured
1900  * @vid: vlan id to be added (0 = untagged only , -1 = any)
1901  **/
1902 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1903 {
1904         struct i40e_mac_filter *f, *add_f;
1905         bool is_netdev, is_vf;
1906
1907         is_vf = (vsi->type == I40E_VSI_SRIOV);
1908         is_netdev = !!(vsi->netdev);
1909
1910         if (is_netdev) {
1911                 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
1912                                         is_vf, is_netdev);
1913                 if (!add_f) {
1914                         dev_info(&vsi->back->pdev->dev,
1915                                  "Could not add vlan filter %d for %pM\n",
1916                                  vid, vsi->netdev->dev_addr);
1917                         return -ENOMEM;
1918                 }
1919         }
1920
1921         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1922                 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1923                 if (!add_f) {
1924                         dev_info(&vsi->back->pdev->dev,
1925                                  "Could not add vlan filter %d for %pM\n",
1926                                  vid, f->macaddr);
1927                         return -ENOMEM;
1928                 }
1929         }
1930
1931         /* Now if we add a vlan tag, make sure to check if it is the first
1932          * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
1933          * with 0, so we now accept untagged and specified tagged traffic
1934          * (and not any taged and untagged)
1935          */
1936         if (vid > 0) {
1937                 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
1938                                                   I40E_VLAN_ANY,
1939                                                   is_vf, is_netdev)) {
1940                         i40e_del_filter(vsi, vsi->netdev->dev_addr,
1941                                         I40E_VLAN_ANY, is_vf, is_netdev);
1942                         add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
1943                                                 is_vf, is_netdev);
1944                         if (!add_f) {
1945                                 dev_info(&vsi->back->pdev->dev,
1946                                          "Could not add filter 0 for %pM\n",
1947                                          vsi->netdev->dev_addr);
1948                                 return -ENOMEM;
1949                         }
1950                 }
1951         }
1952
1953         /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
1954         if (vid > 0 && !vsi->info.pvid) {
1955                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1956                         if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1957                                              is_vf, is_netdev)) {
1958                                 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1959                                                 is_vf, is_netdev);
1960                                 add_f = i40e_add_filter(vsi, f->macaddr,
1961                                                         0, is_vf, is_netdev);
1962                                 if (!add_f) {
1963                                         dev_info(&vsi->back->pdev->dev,
1964                                                  "Could not add filter 0 for %pM\n",
1965                                                  f->macaddr);
1966                                         return -ENOMEM;
1967                                 }
1968                         }
1969                 }
1970         }
1971
1972         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1973             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1974                 return 0;
1975
1976         return i40e_sync_vsi_filters(vsi);
1977 }
1978
1979 /**
1980  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1981  * @vsi: the vsi being configured
1982  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
1983  *
1984  * Return: 0 on success or negative otherwise
1985  **/
1986 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1987 {
1988         struct net_device *netdev = vsi->netdev;
1989         struct i40e_mac_filter *f, *add_f;
1990         bool is_vf, is_netdev;
1991         int filter_count = 0;
1992
1993         is_vf = (vsi->type == I40E_VSI_SRIOV);
1994         is_netdev = !!(netdev);
1995
1996         if (is_netdev)
1997                 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
1998
1999         list_for_each_entry(f, &vsi->mac_filter_list, list)
2000                 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2001
2002         /* go through all the filters for this VSI and if there is only
2003          * vid == 0 it means there are no other filters, so vid 0 must
2004          * be replaced with -1. This signifies that we should from now
2005          * on accept any traffic (with any tag present, or untagged)
2006          */
2007         list_for_each_entry(f, &vsi->mac_filter_list, list) {
2008                 if (is_netdev) {
2009                         if (f->vlan &&
2010                             ether_addr_equal(netdev->dev_addr, f->macaddr))
2011                                 filter_count++;
2012                 }
2013
2014                 if (f->vlan)
2015                         filter_count++;
2016         }
2017
2018         if (!filter_count && is_netdev) {
2019                 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2020                 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2021                                     is_vf, is_netdev);
2022                 if (!f) {
2023                         dev_info(&vsi->back->pdev->dev,
2024                                  "Could not add filter %d for %pM\n",
2025                                  I40E_VLAN_ANY, netdev->dev_addr);
2026                         return -ENOMEM;
2027                 }
2028         }
2029
2030         if (!filter_count) {
2031                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2032                         i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2033                         add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2034                                             is_vf, is_netdev);
2035                         if (!add_f) {
2036                                 dev_info(&vsi->back->pdev->dev,
2037                                          "Could not add filter %d for %pM\n",
2038                                          I40E_VLAN_ANY, f->macaddr);
2039                                 return -ENOMEM;
2040                         }
2041                 }
2042         }
2043
2044         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2045             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2046                 return 0;
2047
2048         return i40e_sync_vsi_filters(vsi);
2049 }
2050
2051 /**
2052  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2053  * @netdev: network interface to be adjusted
2054  * @vid: vlan id to be added
2055  *
2056  * net_device_ops implementation for adding vlan ids
2057  **/
2058 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2059                                 __always_unused __be16 proto, u16 vid)
2060 {
2061         struct i40e_netdev_priv *np = netdev_priv(netdev);
2062         struct i40e_vsi *vsi = np->vsi;
2063         int ret = 0;
2064
2065         if (vid > 4095)
2066                 return -EINVAL;
2067
2068         netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2069
2070         /* If the network stack called us with vid = 0 then
2071          * it is asking to receive priority tagged packets with
2072          * vlan id 0.  Our HW receives them by default when configured
2073          * to receive untagged packets so there is no need to add an
2074          * extra filter for vlan 0 tagged packets.
2075          */
2076         if (vid)
2077                 ret = i40e_vsi_add_vlan(vsi, vid);
2078
2079         if (!ret && (vid < VLAN_N_VID))
2080                 set_bit(vid, vsi->active_vlans);
2081
2082         return ret;
2083 }
2084
2085 /**
2086  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2087  * @netdev: network interface to be adjusted
2088  * @vid: vlan id to be removed
2089  *
2090  * net_device_ops implementation for removing vlan ids
2091  **/
2092 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2093                                  __always_unused __be16 proto, u16 vid)
2094 {
2095         struct i40e_netdev_priv *np = netdev_priv(netdev);
2096         struct i40e_vsi *vsi = np->vsi;
2097
2098         netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2099
2100         /* return code is ignored as there is nothing a user
2101          * can do about failure to remove and a log message was
2102          * already printed from the other function
2103          */
2104         i40e_vsi_kill_vlan(vsi, vid);
2105
2106         clear_bit(vid, vsi->active_vlans);
2107
2108         return 0;
2109 }
2110
2111 /**
2112  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2113  * @vsi: the vsi being brought back up
2114  **/
2115 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2116 {
2117         u16 vid;
2118
2119         if (!vsi->netdev)
2120                 return;
2121
2122         i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2123
2124         for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2125                 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2126                                      vid);
2127 }
2128
2129 /**
2130  * i40e_vsi_add_pvid - Add pvid for the VSI
2131  * @vsi: the vsi being adjusted
2132  * @vid: the vlan id to set as a PVID
2133  **/
2134 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2135 {
2136         struct i40e_vsi_context ctxt;
2137         i40e_status aq_ret;
2138
2139         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2140         vsi->info.pvid = cpu_to_le16(vid);
2141         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2142                                     I40E_AQ_VSI_PVLAN_INSERT_PVID |
2143                                     I40E_AQ_VSI_PVLAN_EMOD_STR;
2144
2145         ctxt.seid = vsi->seid;
2146         memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2147         aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2148         if (aq_ret) {
2149                 dev_info(&vsi->back->pdev->dev,
2150                          "%s: update vsi failed, aq_err=%d\n",
2151                          __func__, vsi->back->hw.aq.asq_last_status);
2152                 return -ENOENT;
2153         }
2154
2155         return 0;
2156 }
2157
2158 /**
2159  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2160  * @vsi: the vsi being adjusted
2161  *
2162  * Just use the vlan_rx_register() service to put it back to normal
2163  **/
2164 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2165 {
2166         i40e_vlan_stripping_disable(vsi);
2167
2168         vsi->info.pvid = 0;
2169 }
2170
2171 /**
2172  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2173  * @vsi: ptr to the VSI
2174  *
2175  * If this function returns with an error, then it's possible one or
2176  * more of the rings is populated (while the rest are not).  It is the
2177  * callers duty to clean those orphaned rings.
2178  *
2179  * Return 0 on success, negative on failure
2180  **/
2181 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2182 {
2183         int i, err = 0;
2184
2185         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2186                 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2187
2188         return err;
2189 }
2190
2191 /**
2192  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2193  * @vsi: ptr to the VSI
2194  *
2195  * Free VSI's transmit software resources
2196  **/
2197 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2198 {
2199         int i;
2200
2201         if (!vsi->tx_rings)
2202                 return;
2203
2204         for (i = 0; i < vsi->num_queue_pairs; i++)
2205                 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2206                         i40e_free_tx_resources(vsi->tx_rings[i]);
2207 }
2208
2209 /**
2210  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2211  * @vsi: ptr to the VSI
2212  *
2213  * If this function returns with an error, then it's possible one or
2214  * more of the rings is populated (while the rest are not).  It is the
2215  * callers duty to clean those orphaned rings.
2216  *
2217  * Return 0 on success, negative on failure
2218  **/
2219 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2220 {
2221         int i, err = 0;
2222
2223         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2224                 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2225         return err;
2226 }
2227
2228 /**
2229  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2230  * @vsi: ptr to the VSI
2231  *
2232  * Free all receive software resources
2233  **/
2234 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2235 {
2236         int i;
2237
2238         if (!vsi->rx_rings)
2239                 return;
2240
2241         for (i = 0; i < vsi->num_queue_pairs; i++)
2242                 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2243                         i40e_free_rx_resources(vsi->rx_rings[i]);
2244 }
2245
2246 /**
2247  * i40e_configure_tx_ring - Configure a transmit ring context and rest
2248  * @ring: The Tx ring to configure
2249  *
2250  * Configure the Tx descriptor ring in the HMC context.
2251  **/
2252 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2253 {
2254         struct i40e_vsi *vsi = ring->vsi;
2255         u16 pf_q = vsi->base_queue + ring->queue_index;
2256         struct i40e_hw *hw = &vsi->back->hw;
2257         struct i40e_hmc_obj_txq tx_ctx;
2258         i40e_status err = 0;
2259         u32 qtx_ctl = 0;
2260
2261         /* some ATR related tx ring init */
2262         if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2263                 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2264                 ring->atr_count = 0;
2265         } else {
2266                 ring->atr_sample_rate = 0;
2267         }
2268
2269         /* initialize XPS */
2270         if (ring->q_vector && ring->netdev &&
2271             vsi->tc_config.numtc <= 1 &&
2272             !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2273                 netif_set_xps_queue(ring->netdev,
2274                                     &ring->q_vector->affinity_mask,
2275                                     ring->queue_index);
2276
2277         /* clear the context structure first */
2278         memset(&tx_ctx, 0, sizeof(tx_ctx));
2279
2280         tx_ctx.new_context = 1;
2281         tx_ctx.base = (ring->dma / 128);
2282         tx_ctx.qlen = ring->count;
2283         tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2284                                                I40E_FLAG_FD_ATR_ENABLED));
2285         tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2286         /* FDIR VSI tx ring can still use RS bit and writebacks */
2287         if (vsi->type != I40E_VSI_FDIR)
2288                 tx_ctx.head_wb_ena = 1;
2289         tx_ctx.head_wb_addr = ring->dma +
2290                               (ring->count * sizeof(struct i40e_tx_desc));
2291
2292         /* As part of VSI creation/update, FW allocates certain
2293          * Tx arbitration queue sets for each TC enabled for
2294          * the VSI. The FW returns the handles to these queue
2295          * sets as part of the response buffer to Add VSI,
2296          * Update VSI, etc. AQ commands. It is expected that
2297          * these queue set handles be associated with the Tx
2298          * queues by the driver as part of the TX queue context
2299          * initialization. This has to be done regardless of
2300          * DCB as by default everything is mapped to TC0.
2301          */
2302         tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2303         tx_ctx.rdylist_act = 0;
2304
2305         /* clear the context in the HMC */
2306         err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2307         if (err) {
2308                 dev_info(&vsi->back->pdev->dev,
2309                          "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2310                          ring->queue_index, pf_q, err);
2311                 return -ENOMEM;
2312         }
2313
2314         /* set the context in the HMC */
2315         err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2316         if (err) {
2317                 dev_info(&vsi->back->pdev->dev,
2318                          "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2319                          ring->queue_index, pf_q, err);
2320                 return -ENOMEM;
2321         }
2322
2323         /* Now associate this queue with this PCI function */
2324         if (vsi->type == I40E_VSI_VMDQ2)
2325                 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2326         else
2327                 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2328         qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2329                     I40E_QTX_CTL_PF_INDX_MASK);
2330         wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2331         i40e_flush(hw);
2332
2333         clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2334
2335         /* cache tail off for easier writes later */
2336         ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2337
2338         return 0;
2339 }
2340
2341 /**
2342  * i40e_configure_rx_ring - Configure a receive ring context
2343  * @ring: The Rx ring to configure
2344  *
2345  * Configure the Rx descriptor ring in the HMC context.
2346  **/
2347 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2348 {
2349         struct i40e_vsi *vsi = ring->vsi;
2350         u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2351         u16 pf_q = vsi->base_queue + ring->queue_index;
2352         struct i40e_hw *hw = &vsi->back->hw;
2353         struct i40e_hmc_obj_rxq rx_ctx;
2354         i40e_status err = 0;
2355
2356         ring->state = 0;
2357
2358         /* clear the context structure first */
2359         memset(&rx_ctx, 0, sizeof(rx_ctx));
2360
2361         ring->rx_buf_len = vsi->rx_buf_len;
2362         ring->rx_hdr_len = vsi->rx_hdr_len;
2363
2364         rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2365         rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2366
2367         rx_ctx.base = (ring->dma / 128);
2368         rx_ctx.qlen = ring->count;
2369
2370         if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2371                 set_ring_16byte_desc_enabled(ring);
2372                 rx_ctx.dsize = 0;
2373         } else {
2374                 rx_ctx.dsize = 1;
2375         }
2376
2377         rx_ctx.dtype = vsi->dtype;
2378         if (vsi->dtype) {
2379                 set_ring_ps_enabled(ring);
2380                 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
2381                                   I40E_RX_SPLIT_IP      |
2382                                   I40E_RX_SPLIT_TCP_UDP |
2383                                   I40E_RX_SPLIT_SCTP;
2384         } else {
2385                 rx_ctx.hsplit_0 = 0;
2386         }
2387
2388         rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2389                                   (chain_len * ring->rx_buf_len));
2390         rx_ctx.tphrdesc_ena = 1;
2391         rx_ctx.tphwdesc_ena = 1;
2392         rx_ctx.tphdata_ena = 1;
2393         rx_ctx.tphhead_ena = 1;
2394         if (hw->revision_id == 0)
2395                 rx_ctx.lrxqthresh = 0;
2396         else
2397                 rx_ctx.lrxqthresh = 2;
2398         rx_ctx.crcstrip = 1;
2399         rx_ctx.l2tsel = 1;
2400         rx_ctx.showiv = 1;
2401         /* set the prefena field to 1 because the manual says to */
2402         rx_ctx.prefena = 1;
2403
2404         /* clear the context in the HMC */
2405         err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2406         if (err) {
2407                 dev_info(&vsi->back->pdev->dev,
2408                          "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2409                          ring->queue_index, pf_q, err);
2410                 return -ENOMEM;
2411         }
2412
2413         /* set the context in the HMC */
2414         err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2415         if (err) {
2416                 dev_info(&vsi->back->pdev->dev,
2417                          "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2418                          ring->queue_index, pf_q, err);
2419                 return -ENOMEM;
2420         }
2421
2422         /* cache tail for quicker writes, and clear the reg before use */
2423         ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2424         writel(0, ring->tail);
2425
2426         i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2427
2428         return 0;
2429 }
2430
2431 /**
2432  * i40e_vsi_configure_tx - Configure the VSI for Tx
2433  * @vsi: VSI structure describing this set of rings and resources
2434  *
2435  * Configure the Tx VSI for operation.
2436  **/
2437 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2438 {
2439         int err = 0;
2440         u16 i;
2441
2442         for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2443                 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2444
2445         return err;
2446 }
2447
2448 /**
2449  * i40e_vsi_configure_rx - Configure the VSI for Rx
2450  * @vsi: the VSI being configured
2451  *
2452  * Configure the Rx VSI for operation.
2453  **/
2454 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2455 {
2456         int err = 0;
2457         u16 i;
2458
2459         if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2460                 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2461                                + ETH_FCS_LEN + VLAN_HLEN;
2462         else
2463                 vsi->max_frame = I40E_RXBUFFER_2048;
2464
2465         /* figure out correct receive buffer length */
2466         switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2467                                     I40E_FLAG_RX_PS_ENABLED)) {
2468         case I40E_FLAG_RX_1BUF_ENABLED:
2469                 vsi->rx_hdr_len = 0;
2470                 vsi->rx_buf_len = vsi->max_frame;
2471                 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2472                 break;
2473         case I40E_FLAG_RX_PS_ENABLED:
2474                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2475                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2476                 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2477                 break;
2478         default:
2479                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2480                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2481                 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2482                 break;
2483         }
2484
2485         /* round up for the chip's needs */
2486         vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2487                                 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2488         vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2489                                 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2490
2491         /* set up individual rings */
2492         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2493                 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2494
2495         return err;
2496 }
2497
2498 /**
2499  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2500  * @vsi: ptr to the VSI
2501  **/
2502 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2503 {
2504         struct i40e_ring *tx_ring, *rx_ring;
2505         u16 qoffset, qcount;
2506         int i, n;
2507
2508         if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2509                 return;
2510
2511         for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2512                 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2513                         continue;
2514
2515                 qoffset = vsi->tc_config.tc_info[n].qoffset;
2516                 qcount = vsi->tc_config.tc_info[n].qcount;
2517                 for (i = qoffset; i < (qoffset + qcount); i++) {
2518                         rx_ring = vsi->rx_rings[i];
2519                         tx_ring = vsi->tx_rings[i];
2520                         rx_ring->dcb_tc = n;
2521                         tx_ring->dcb_tc = n;
2522                 }
2523         }
2524 }
2525
2526 /**
2527  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2528  * @vsi: ptr to the VSI
2529  **/
2530 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2531 {
2532         if (vsi->netdev)
2533                 i40e_set_rx_mode(vsi->netdev);
2534 }
2535
2536 /**
2537  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2538  * @vsi: Pointer to the targeted VSI
2539  *
2540  * This function replays the hlist on the hw where all the SB Flow Director
2541  * filters were saved.
2542  **/
2543 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2544 {
2545         struct i40e_fdir_filter *filter;
2546         struct i40e_pf *pf = vsi->back;
2547         struct hlist_node *node;
2548
2549         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2550                 return;
2551
2552         hlist_for_each_entry_safe(filter, node,
2553                                   &pf->fdir_filter_list, fdir_node) {
2554                 i40e_add_del_fdir(vsi, filter, true);
2555         }
2556 }
2557
2558 /**
2559  * i40e_vsi_configure - Set up the VSI for action
2560  * @vsi: the VSI being configured
2561  **/
2562 static int i40e_vsi_configure(struct i40e_vsi *vsi)
2563 {
2564         int err;
2565
2566         i40e_set_vsi_rx_mode(vsi);
2567         i40e_restore_vlan(vsi);
2568         i40e_vsi_config_dcb_rings(vsi);
2569         err = i40e_vsi_configure_tx(vsi);
2570         if (!err)
2571                 err = i40e_vsi_configure_rx(vsi);
2572
2573         return err;
2574 }
2575
2576 /**
2577  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2578  * @vsi: the VSI being configured
2579  **/
2580 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2581 {
2582         struct i40e_pf *pf = vsi->back;
2583         struct i40e_q_vector *q_vector;
2584         struct i40e_hw *hw = &pf->hw;
2585         u16 vector;
2586         int i, q;
2587         u32 val;
2588         u32 qp;
2589
2590         /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2591          * and PFINT_LNKLSTn registers, e.g.:
2592          *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
2593          */
2594         qp = vsi->base_queue;
2595         vector = vsi->base_vector;
2596         for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2597                 q_vector = vsi->q_vectors[i];
2598                 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2599                 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2600                 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2601                      q_vector->rx.itr);
2602                 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2603                 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2604                 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2605                      q_vector->tx.itr);
2606
2607                 /* Linked list for the queuepairs assigned to this vector */
2608                 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2609                 for (q = 0; q < q_vector->num_ringpairs; q++) {
2610                         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2611                               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
2612                               (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2613                               (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2614                               (I40E_QUEUE_TYPE_TX
2615                                       << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2616
2617                         wr32(hw, I40E_QINT_RQCTL(qp), val);
2618
2619                         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2620                               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
2621                               (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2622                               ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2623                               (I40E_QUEUE_TYPE_RX
2624                                       << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2625
2626                         /* Terminate the linked list */
2627                         if (q == (q_vector->num_ringpairs - 1))
2628                                 val |= (I40E_QUEUE_END_OF_LIST
2629                                            << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2630
2631                         wr32(hw, I40E_QINT_TQCTL(qp), val);
2632                         qp++;
2633                 }
2634         }
2635
2636         i40e_flush(hw);
2637 }
2638
2639 /**
2640  * i40e_enable_misc_int_causes - enable the non-queue interrupts
2641  * @hw: ptr to the hardware info
2642  **/
2643 static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2644 {
2645         u32 val;
2646
2647         /* clear things first */
2648         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2649         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2650
2651         val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
2652               I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
2653               I40E_PFINT_ICR0_ENA_GRST_MASK          |
2654               I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2655               I40E_PFINT_ICR0_ENA_GPIO_MASK          |
2656               I40E_PFINT_ICR0_ENA_TIMESYNC_MASK      |
2657               I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
2658               I40E_PFINT_ICR0_ENA_VFLR_MASK          |
2659               I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2660
2661         wr32(hw, I40E_PFINT_ICR0_ENA, val);
2662
2663         /* SW_ITR_IDX = 0, but don't change INTENA */
2664         wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2665                                         I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2666
2667         /* OTHER_ITR_IDX = 0 */
2668         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2669 }
2670
2671 /**
2672  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2673  * @vsi: the VSI being configured
2674  **/
2675 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2676 {
2677         struct i40e_q_vector *q_vector = vsi->q_vectors[0];
2678         struct i40e_pf *pf = vsi->back;
2679         struct i40e_hw *hw = &pf->hw;
2680         u32 val;
2681
2682         /* set the ITR configuration */
2683         q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2684         q_vector->rx.latency_range = I40E_LOW_LATENCY;
2685         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2686         q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2687         q_vector->tx.latency_range = I40E_LOW_LATENCY;
2688         wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2689
2690         i40e_enable_misc_int_causes(hw);
2691
2692         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2693         wr32(hw, I40E_PFINT_LNKLST0, 0);
2694
2695         /* Associate the queue pair to the vector and enable the queue int */
2696         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK                  |
2697               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2698               (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2699
2700         wr32(hw, I40E_QINT_RQCTL(0), val);
2701
2702         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK                  |
2703               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2704               (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2705
2706         wr32(hw, I40E_QINT_TQCTL(0), val);
2707         i40e_flush(hw);
2708 }
2709
2710 /**
2711  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2712  * @pf: board private structure
2713  **/
2714 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2715 {
2716         struct i40e_hw *hw = &pf->hw;
2717
2718         wr32(hw, I40E_PFINT_DYN_CTL0,
2719              I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2720         i40e_flush(hw);
2721 }
2722
2723 /**
2724  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2725  * @pf: board private structure
2726  **/
2727 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2728 {
2729         struct i40e_hw *hw = &pf->hw;
2730         u32 val;
2731
2732         val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
2733               I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2734               (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2735
2736         wr32(hw, I40E_PFINT_DYN_CTL0, val);
2737         i40e_flush(hw);
2738 }
2739
2740 /**
2741  * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2742  * @vsi: pointer to a vsi
2743  * @vector: enable a particular Hw Interrupt vector
2744  **/
2745 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2746 {
2747         struct i40e_pf *pf = vsi->back;
2748         struct i40e_hw *hw = &pf->hw;
2749         u32 val;
2750
2751         val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2752               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2753               (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2754         wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2755         /* skip the flush */
2756 }
2757
2758 /**
2759  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2760  * @irq: interrupt number
2761  * @data: pointer to a q_vector
2762  **/
2763 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2764 {
2765         struct i40e_q_vector *q_vector = data;
2766
2767         if (!q_vector->tx.ring && !q_vector->rx.ring)
2768                 return IRQ_HANDLED;
2769
2770         napi_schedule(&q_vector->napi);
2771
2772         return IRQ_HANDLED;
2773 }
2774
2775 /**
2776  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2777  * @vsi: the VSI being configured
2778  * @basename: name for the vector
2779  *
2780  * Allocates MSI-X vectors and requests interrupts from the kernel.
2781  **/
2782 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2783 {
2784         int q_vectors = vsi->num_q_vectors;
2785         struct i40e_pf *pf = vsi->back;
2786         int base = vsi->base_vector;
2787         int rx_int_idx = 0;
2788         int tx_int_idx = 0;
2789         int vector, err;
2790
2791         for (vector = 0; vector < q_vectors; vector++) {
2792                 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
2793
2794                 if (q_vector->tx.ring && q_vector->rx.ring) {
2795                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2796                                  "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2797                         tx_int_idx++;
2798                 } else if (q_vector->rx.ring) {
2799                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2800                                  "%s-%s-%d", basename, "rx", rx_int_idx++);
2801                 } else if (q_vector->tx.ring) {
2802                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2803                                  "%s-%s-%d", basename, "tx", tx_int_idx++);
2804                 } else {
2805                         /* skip this unused q_vector */
2806                         continue;
2807                 }
2808                 err = request_irq(pf->msix_entries[base + vector].vector,
2809                                   vsi->irq_handler,
2810                                   0,
2811                                   q_vector->name,
2812                                   q_vector);
2813                 if (err) {
2814                         dev_info(&pf->pdev->dev,
2815                                  "%s: request_irq failed, error: %d\n",
2816                                  __func__, err);
2817                         goto free_queue_irqs;
2818                 }
2819                 /* assign the mask for this irq */
2820                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2821                                       &q_vector->affinity_mask);
2822         }
2823
2824         vsi->irqs_ready = true;
2825         return 0;
2826
2827 free_queue_irqs:
2828         while (vector) {
2829                 vector--;
2830                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2831                                       NULL);
2832                 free_irq(pf->msix_entries[base + vector].vector,
2833                          &(vsi->q_vectors[vector]));
2834         }
2835         return err;
2836 }
2837
2838 /**
2839  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
2840  * @vsi: the VSI being un-configured
2841  **/
2842 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
2843 {
2844         struct i40e_pf *pf = vsi->back;
2845         struct i40e_hw *hw = &pf->hw;
2846         int base = vsi->base_vector;
2847         int i;
2848
2849         for (i = 0; i < vsi->num_queue_pairs; i++) {
2850                 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
2851                 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
2852         }
2853
2854         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2855                 for (i = vsi->base_vector;
2856                      i < (vsi->num_q_vectors + vsi->base_vector); i++)
2857                         wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
2858
2859                 i40e_flush(hw);
2860                 for (i = 0; i < vsi->num_q_vectors; i++)
2861                         synchronize_irq(pf->msix_entries[i + base].vector);
2862         } else {
2863                 /* Legacy and MSI mode - this stops all interrupt handling */
2864                 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
2865                 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
2866                 i40e_flush(hw);
2867                 synchronize_irq(pf->pdev->irq);
2868         }
2869 }
2870
2871 /**
2872  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
2873  * @vsi: the VSI being configured
2874  **/
2875 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2876 {
2877         struct i40e_pf *pf = vsi->back;
2878         int i;
2879
2880         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2881                 for (i = vsi->base_vector;
2882                      i < (vsi->num_q_vectors + vsi->base_vector); i++)
2883                         i40e_irq_dynamic_enable(vsi, i);
2884         } else {
2885                 i40e_irq_dynamic_enable_icr0(pf);
2886         }
2887
2888         i40e_flush(&pf->hw);
2889         return 0;
2890 }
2891
2892 /**
2893  * i40e_stop_misc_vector - Stop the vector that handles non-queue events
2894  * @pf: board private structure
2895  **/
2896 static void i40e_stop_misc_vector(struct i40e_pf *pf)
2897 {
2898         /* Disable ICR 0 */
2899         wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
2900         i40e_flush(&pf->hw);
2901 }
2902
2903 /**
2904  * i40e_intr - MSI/Legacy and non-queue interrupt handler
2905  * @irq: interrupt number
2906  * @data: pointer to a q_vector
2907  *
2908  * This is the handler used for all MSI/Legacy interrupts, and deals
2909  * with both queue and non-queue interrupts.  This is also used in
2910  * MSIX mode to handle the non-queue interrupts.
2911  **/
2912 static irqreturn_t i40e_intr(int irq, void *data)
2913 {
2914         struct i40e_pf *pf = (struct i40e_pf *)data;
2915         struct i40e_hw *hw = &pf->hw;
2916         irqreturn_t ret = IRQ_NONE;
2917         u32 icr0, icr0_remaining;
2918         u32 val, ena_mask;
2919
2920         icr0 = rd32(hw, I40E_PFINT_ICR0);
2921         ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
2922
2923         /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2924         if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2925                 goto enable_intr;
2926
2927         /* if interrupt but no bits showing, must be SWINT */
2928         if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
2929             (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
2930                 pf->sw_int_count++;
2931
2932         /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
2933         if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
2934
2935                 /* temporarily disable queue cause for NAPI processing */
2936                 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
2937                 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2938                 wr32(hw, I40E_QINT_RQCTL(0), qval);
2939
2940                 qval = rd32(hw, I40E_QINT_TQCTL(0));
2941                 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2942                 wr32(hw, I40E_QINT_TQCTL(0), qval);
2943
2944                 if (!test_bit(__I40E_DOWN, &pf->state))
2945                         napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
2946         }
2947
2948         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
2949                 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2950                 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
2951         }
2952
2953         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
2954                 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2955                 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
2956         }
2957
2958         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
2959                 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
2960                 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2961         }
2962
2963         if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
2964                 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
2965                         set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
2966                 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
2967                 val = rd32(hw, I40E_GLGEN_RSTAT);
2968                 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
2969                        >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
2970                 if (val == I40E_RESET_CORER) {
2971                         pf->corer_count++;
2972                 } else if (val == I40E_RESET_GLOBR) {
2973                         pf->globr_count++;
2974                 } else if (val == I40E_RESET_EMPR) {
2975                         pf->empr_count++;
2976                         set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
2977                 }
2978         }
2979
2980         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
2981                 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
2982                 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
2983         }
2984
2985         if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
2986                 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
2987
2988                 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
2989                         icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2990                         i40e_ptp_tx_hwtstamp(pf);
2991                 }
2992         }
2993
2994         /* If a critical error is pending we have no choice but to reset the
2995          * device.
2996          * Report and mask out any remaining unexpected interrupts.
2997          */
2998         icr0_remaining = icr0 & ena_mask;
2999         if (icr0_remaining) {
3000                 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3001                          icr0_remaining);
3002                 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3003                     (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3004                     (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3005                         dev_info(&pf->pdev->dev, "device will be reset\n");
3006                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3007                         i40e_service_event_schedule(pf);
3008                 }
3009                 ena_mask &= ~icr0_remaining;
3010         }
3011         ret = IRQ_HANDLED;
3012
3013 enable_intr:
3014         /* re-enable interrupt causes */
3015         wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3016         if (!test_bit(__I40E_DOWN, &pf->state)) {
3017                 i40e_service_event_schedule(pf);
3018                 i40e_irq_dynamic_enable_icr0(pf);
3019         }
3020
3021         return ret;
3022 }
3023
3024 /**
3025  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3026  * @tx_ring:  tx ring to clean
3027  * @budget:   how many cleans we're allowed
3028  *
3029  * Returns true if there's any budget left (e.g. the clean is finished)
3030  **/
3031 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3032 {
3033         struct i40e_vsi *vsi = tx_ring->vsi;
3034         u16 i = tx_ring->next_to_clean;
3035         struct i40e_tx_buffer *tx_buf;
3036         struct i40e_tx_desc *tx_desc;
3037
3038         tx_buf = &tx_ring->tx_bi[i];
3039         tx_desc = I40E_TX_DESC(tx_ring, i);
3040         i -= tx_ring->count;
3041
3042         do {
3043                 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3044
3045                 /* if next_to_watch is not set then there is no work pending */
3046                 if (!eop_desc)
3047                         break;
3048
3049                 /* prevent any other reads prior to eop_desc */
3050                 read_barrier_depends();
3051
3052                 /* if the descriptor isn't done, no work yet to do */
3053                 if (!(eop_desc->cmd_type_offset_bsz &
3054                       cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3055                         break;
3056
3057                 /* clear next_to_watch to prevent false hangs */
3058                 tx_buf->next_to_watch = NULL;
3059
3060                 /* unmap skb header data */
3061                 dma_unmap_single(tx_ring->dev,
3062                                  dma_unmap_addr(tx_buf, dma),
3063                                  dma_unmap_len(tx_buf, len),
3064                                  DMA_TO_DEVICE);
3065
3066                 dma_unmap_len_set(tx_buf, len, 0);
3067
3068
3069                 /* move to the next desc and buffer to clean */
3070                 tx_buf++;
3071                 tx_desc++;
3072                 i++;
3073                 if (unlikely(!i)) {
3074                         i -= tx_ring->count;
3075                         tx_buf = tx_ring->tx_bi;
3076                         tx_desc = I40E_TX_DESC(tx_ring, 0);
3077                 }
3078
3079                 /* update budget accounting */
3080                 budget--;
3081         } while (likely(budget));
3082
3083         i += tx_ring->count;
3084         tx_ring->next_to_clean = i;
3085
3086         if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3087                 i40e_irq_dynamic_enable(vsi,
3088                                 tx_ring->q_vector->v_idx + vsi->base_vector);
3089         }
3090         return budget > 0;
3091 }
3092
3093 /**
3094  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3095  * @irq: interrupt number
3096  * @data: pointer to a q_vector
3097  **/
3098 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3099 {
3100         struct i40e_q_vector *q_vector = data;
3101         struct i40e_vsi *vsi;
3102
3103         if (!q_vector->tx.ring)
3104                 return IRQ_HANDLED;
3105
3106         vsi = q_vector->tx.ring->vsi;
3107         i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3108
3109         return IRQ_HANDLED;
3110 }
3111
3112 /**
3113  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3114  * @vsi: the VSI being configured
3115  * @v_idx: vector index
3116  * @qp_idx: queue pair index
3117  **/
3118 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3119 {
3120         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3121         struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3122         struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3123
3124         tx_ring->q_vector = q_vector;
3125         tx_ring->next = q_vector->tx.ring;
3126         q_vector->tx.ring = tx_ring;
3127         q_vector->tx.count++;
3128
3129         rx_ring->q_vector = q_vector;
3130         rx_ring->next = q_vector->rx.ring;
3131         q_vector->rx.ring = rx_ring;
3132         q_vector->rx.count++;
3133 }
3134
3135 /**
3136  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3137  * @vsi: the VSI being configured
3138  *
3139  * This function maps descriptor rings to the queue-specific vectors
3140  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
3141  * one vector per queue pair, but on a constrained vector budget, we
3142  * group the queue pairs as "efficiently" as possible.
3143  **/
3144 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3145 {
3146         int qp_remaining = vsi->num_queue_pairs;
3147         int q_vectors = vsi->num_q_vectors;
3148         int num_ringpairs;
3149         int v_start = 0;
3150         int qp_idx = 0;
3151
3152         /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3153          * group them so there are multiple queues per vector.
3154          */
3155         for (; v_start < q_vectors && qp_remaining; v_start++) {
3156                 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3157
3158                 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3159
3160                 q_vector->num_ringpairs = num_ringpairs;
3161
3162                 q_vector->rx.count = 0;
3163                 q_vector->tx.count = 0;
3164                 q_vector->rx.ring = NULL;
3165                 q_vector->tx.ring = NULL;
3166
3167                 while (num_ringpairs--) {
3168                         map_vector_to_qp(vsi, v_start, qp_idx);
3169                         qp_idx++;
3170                         qp_remaining--;
3171                 }
3172         }
3173 }
3174
3175 /**
3176  * i40e_vsi_request_irq - Request IRQ from the OS
3177  * @vsi: the VSI being configured
3178  * @basename: name for the vector
3179  **/
3180 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3181 {
3182         struct i40e_pf *pf = vsi->back;
3183         int err;
3184
3185         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3186                 err = i40e_vsi_request_irq_msix(vsi, basename);
3187         else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3188                 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3189                                   pf->misc_int_name, pf);
3190         else
3191                 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3192                                   pf->misc_int_name, pf);
3193
3194         if (err)
3195                 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3196
3197         return err;
3198 }
3199
3200 #ifdef CONFIG_NET_POLL_CONTROLLER
3201 /**
3202  * i40e_netpoll - A Polling 'interrupt'handler
3203  * @netdev: network interface device structure
3204  *
3205  * This is used by netconsole to send skbs without having to re-enable
3206  * interrupts.  It's not called while the normal interrupt routine is executing.
3207  **/
3208 static void i40e_netpoll(struct net_device *netdev)
3209 {
3210         struct i40e_netdev_priv *np = netdev_priv(netdev);
3211         struct i40e_vsi *vsi = np->vsi;
3212         struct i40e_pf *pf = vsi->back;
3213         int i;
3214
3215         /* if interface is down do nothing */
3216         if (test_bit(__I40E_DOWN, &vsi->state))
3217                 return;
3218
3219         pf->flags |= I40E_FLAG_IN_NETPOLL;
3220         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3221                 for (i = 0; i < vsi->num_q_vectors; i++)
3222                         i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3223         } else {
3224                 i40e_intr(pf->pdev->irq, netdev);
3225         }
3226         pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3227 }
3228 #endif
3229
3230 /**
3231  * i40e_vsi_control_tx - Start or stop a VSI's rings
3232  * @vsi: the VSI being configured
3233  * @enable: start or stop the rings
3234  **/
3235 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3236 {
3237         struct i40e_pf *pf = vsi->back;
3238         struct i40e_hw *hw = &pf->hw;
3239         int i, j, pf_q;
3240         u32 tx_reg;
3241
3242         pf_q = vsi->base_queue;
3243         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3244
3245                 /* warn the TX unit of coming changes */
3246                 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3247                 if (!enable)
3248                         udelay(10);
3249
3250                 for (j = 0; j < 50; j++) {
3251                         tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3252                         if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3253                             ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3254                                 break;
3255                         usleep_range(1000, 2000);
3256                 }
3257                 /* Skip if the queue is already in the requested state */
3258                 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3259                         continue;
3260
3261                 /* turn on/off the queue */
3262                 if (enable) {
3263                         wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3264                         tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3265                 } else {
3266                         tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3267                 }
3268
3269                 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3270
3271                 /* wait for the change to finish */
3272                 for (j = 0; j < 10; j++) {
3273                         tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3274                         if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3275                                 break;
3276
3277                         udelay(10);
3278                 }
3279                 if (j >= 10) {
3280                         dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
3281                                  pf_q, (enable ? "en" : "dis"));
3282                         return -ETIMEDOUT;
3283                 }
3284         }
3285
3286         if (hw->revision_id == 0)
3287                 mdelay(50);
3288
3289         return 0;
3290 }
3291
3292 /**
3293  * i40e_vsi_control_rx - Start or stop a VSI's rings
3294  * @vsi: the VSI being configured
3295  * @enable: start or stop the rings
3296  **/
3297 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3298 {
3299         struct i40e_pf *pf = vsi->back;
3300         struct i40e_hw *hw = &pf->hw;
3301         int i, j, pf_q;
3302         u32 rx_reg;
3303
3304         pf_q = vsi->base_queue;
3305         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3306                 for (j = 0; j < 50; j++) {
3307                         rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3308                         if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3309                             ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3310                                 break;
3311                         usleep_range(1000, 2000);
3312                 }
3313
3314                 /* Skip if the queue is already in the requested state */
3315                 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3316                         continue;
3317
3318                 /* turn on/off the queue */
3319                 if (enable)
3320                         rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3321                 else
3322                         rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3323                 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3324
3325                 /* wait for the change to finish */
3326                 for (j = 0; j < 10; j++) {
3327                         rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3328
3329                         if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3330                                 break;
3331
3332                         udelay(10);
3333                 }
3334                 if (j >= 10) {
3335                         dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
3336                                  pf_q, (enable ? "en" : "dis"));
3337                         return -ETIMEDOUT;
3338                 }
3339         }
3340
3341         return 0;
3342 }
3343
3344 /**
3345  * i40e_vsi_control_rings - Start or stop a VSI's rings
3346  * @vsi: the VSI being configured
3347  * @enable: start or stop the rings
3348  **/
3349 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3350 {
3351         int ret = 0;
3352
3353         /* do rx first for enable and last for disable */
3354         if (request) {
3355                 ret = i40e_vsi_control_rx(vsi, request);
3356                 if (ret)
3357                         return ret;
3358                 ret = i40e_vsi_control_tx(vsi, request);
3359         } else {
3360                 /* Ignore return value, we need to shutdown whatever we can */
3361                 i40e_vsi_control_tx(vsi, request);
3362                 i40e_vsi_control_rx(vsi, request);
3363         }
3364
3365         return ret;
3366 }
3367
3368 /**
3369  * i40e_vsi_free_irq - Free the irq association with the OS
3370  * @vsi: the VSI being configured
3371  **/
3372 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3373 {
3374         struct i40e_pf *pf = vsi->back;
3375         struct i40e_hw *hw = &pf->hw;
3376         int base = vsi->base_vector;
3377         u32 val, qp;
3378         int i;
3379
3380         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3381                 if (!vsi->q_vectors)
3382                         return;
3383
3384                 if (!vsi->irqs_ready)
3385                         return;
3386
3387                 vsi->irqs_ready = false;
3388                 for (i = 0; i < vsi->num_q_vectors; i++) {
3389                         u16 vector = i + base;
3390
3391                         /* free only the irqs that were actually requested */
3392                         if (!vsi->q_vectors[i] ||
3393                             !vsi->q_vectors[i]->num_ringpairs)
3394                                 continue;
3395
3396                         /* clear the affinity_mask in the IRQ descriptor */
3397                         irq_set_affinity_hint(pf->msix_entries[vector].vector,
3398                                               NULL);
3399                         free_irq(pf->msix_entries[vector].vector,
3400                                  vsi->q_vectors[i]);
3401
3402                         /* Tear down the interrupt queue link list
3403                          *
3404                          * We know that they come in pairs and always
3405                          * the Rx first, then the Tx.  To clear the
3406                          * link list, stick the EOL value into the
3407                          * next_q field of the registers.
3408                          */
3409                         val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3410                         qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3411                                 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3412                         val |= I40E_QUEUE_END_OF_LIST
3413                                 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3414                         wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3415
3416                         while (qp != I40E_QUEUE_END_OF_LIST) {
3417                                 u32 next;
3418
3419                                 val = rd32(hw, I40E_QINT_RQCTL(qp));
3420
3421                                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3422                                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3423                                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3424                                          I40E_QINT_RQCTL_INTEVENT_MASK);
3425
3426                                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3427                                          I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3428
3429                                 wr32(hw, I40E_QINT_RQCTL(qp), val);
3430
3431                                 val = rd32(hw, I40E_QINT_TQCTL(qp));
3432
3433                                 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3434                                         >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3435
3436                                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3437                                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3438                                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3439                                          I40E_QINT_TQCTL_INTEVENT_MASK);
3440
3441                                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3442                                          I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3443
3444                                 wr32(hw, I40E_QINT_TQCTL(qp), val);
3445                                 qp = next;
3446                         }
3447                 }
3448         } else {
3449                 free_irq(pf->pdev->irq, pf);
3450
3451                 val = rd32(hw, I40E_PFINT_LNKLST0);
3452                 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3453                         >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3454                 val |= I40E_QUEUE_END_OF_LIST
3455                         << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3456                 wr32(hw, I40E_PFINT_LNKLST0, val);
3457
3458                 val = rd32(hw, I40E_QINT_RQCTL(qp));
3459                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3460                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3461                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3462                          I40E_QINT_RQCTL_INTEVENT_MASK);
3463
3464                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3465                         I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3466
3467                 wr32(hw, I40E_QINT_RQCTL(qp), val);
3468
3469                 val = rd32(hw, I40E_QINT_TQCTL(qp));
3470
3471                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3472                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3473                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3474                          I40E_QINT_TQCTL_INTEVENT_MASK);
3475
3476                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3477                         I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3478
3479                 wr32(hw, I40E_QINT_TQCTL(qp), val);
3480         }
3481 }
3482
3483 /**
3484  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3485  * @vsi: the VSI being configured
3486  * @v_idx: Index of vector to be freed
3487  *
3488  * This function frees the memory allocated to the q_vector.  In addition if
3489  * NAPI is enabled it will delete any references to the NAPI struct prior
3490  * to freeing the q_vector.
3491  **/
3492 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3493 {
3494         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3495         struct i40e_ring *ring;
3496
3497         if (!q_vector)
3498                 return;
3499
3500         /* disassociate q_vector from rings */
3501         i40e_for_each_ring(ring, q_vector->tx)
3502                 ring->q_vector = NULL;
3503
3504         i40e_for_each_ring(ring, q_vector->rx)
3505                 ring->q_vector = NULL;
3506
3507         /* only VSI w/ an associated netdev is set up w/ NAPI */
3508         if (vsi->netdev)
3509                 netif_napi_del(&q_vector->napi);
3510
3511         vsi->q_vectors[v_idx] = NULL;
3512
3513         kfree_rcu(q_vector, rcu);
3514 }
3515
3516 /**
3517  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3518  * @vsi: the VSI being un-configured
3519  *
3520  * This frees the memory allocated to the q_vectors and
3521  * deletes references to the NAPI struct.
3522  **/
3523 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3524 {
3525         int v_idx;
3526
3527         for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3528                 i40e_free_q_vector(vsi, v_idx);
3529 }
3530
3531 /**
3532  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3533  * @pf: board private structure
3534  **/
3535 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3536 {
3537         /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3538         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3539                 pci_disable_msix(pf->pdev);
3540                 kfree(pf->msix_entries);
3541                 pf->msix_entries = NULL;
3542         } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3543                 pci_disable_msi(pf->pdev);
3544         }
3545         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3546 }
3547
3548 /**
3549  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3550  * @pf: board private structure
3551  *
3552  * We go through and clear interrupt specific resources and reset the structure
3553  * to pre-load conditions
3554  **/
3555 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3556 {
3557         int i;
3558
3559         i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3560         for (i = 0; i < pf->num_alloc_vsi; i++)
3561                 if (pf->vsi[i])
3562                         i40e_vsi_free_q_vectors(pf->vsi[i]);
3563         i40e_reset_interrupt_capability(pf);
3564 }
3565
3566 /**
3567  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3568  * @vsi: the VSI being configured
3569  **/
3570 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3571 {
3572         int q_idx;
3573
3574         if (!vsi->netdev)
3575                 return;
3576
3577         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3578                 napi_enable(&vsi->q_vectors[q_idx]->napi);
3579 }
3580
3581 /**
3582  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3583  * @vsi: the VSI being configured
3584  **/
3585 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3586 {
3587         int q_idx;
3588
3589         if (!vsi->netdev)
3590                 return;
3591
3592         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3593                 napi_disable(&vsi->q_vectors[q_idx]->napi);
3594 }
3595
3596 /**
3597  * i40e_vsi_close - Shut down a VSI
3598  * @vsi: the vsi to be quelled
3599  **/
3600 static void i40e_vsi_close(struct i40e_vsi *vsi)
3601 {
3602         if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3603                 i40e_down(vsi);
3604         i40e_vsi_free_irq(vsi);
3605         i40e_vsi_free_tx_resources(vsi);
3606         i40e_vsi_free_rx_resources(vsi);
3607 }
3608
3609 /**
3610  * i40e_quiesce_vsi - Pause a given VSI
3611  * @vsi: the VSI being paused
3612  **/
3613 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3614 {
3615         if (test_bit(__I40E_DOWN, &vsi->state))
3616                 return;
3617
3618         set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3619         if (vsi->netdev && netif_running(vsi->netdev)) {
3620                 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3621         } else {
3622                 i40e_vsi_close(vsi);
3623         }
3624 }
3625
3626 /**
3627  * i40e_unquiesce_vsi - Resume a given VSI
3628  * @vsi: the VSI being resumed
3629  **/
3630 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3631 {
3632         if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3633                 return;
3634
3635         clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3636         if (vsi->netdev && netif_running(vsi->netdev))
3637                 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3638         else
3639                 i40e_vsi_open(vsi);   /* this clears the DOWN bit */
3640 }
3641
3642 /**
3643  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3644  * @pf: the PF
3645  **/
3646 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3647 {
3648         int v;
3649
3650         for (v = 0; v < pf->num_alloc_vsi; v++) {
3651                 if (pf->vsi[v])
3652                         i40e_quiesce_vsi(pf->vsi[v]);
3653         }
3654 }
3655
3656 /**
3657  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3658  * @pf: the PF
3659  **/
3660 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3661 {
3662         int v;
3663
3664         for (v = 0; v < pf->num_alloc_vsi; v++) {
3665                 if (pf->vsi[v])
3666                         i40e_unquiesce_vsi(pf->vsi[v]);
3667         }
3668 }
3669
3670 /**
3671  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
3672  * @dcbcfg: the corresponding DCBx configuration structure
3673  *
3674  * Return the number of TCs from given DCBx configuration
3675  **/
3676 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3677 {
3678         u8 num_tc = 0;
3679         int i;
3680
3681         /* Scan the ETS Config Priority Table to find
3682          * traffic class enabled for a given priority
3683          * and use the traffic class index to get the
3684          * number of traffic classes enabled
3685          */
3686         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3687                 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
3688                         num_tc = dcbcfg->etscfg.prioritytable[i];
3689         }
3690
3691         /* Traffic class index starts from zero so
3692          * increment to return the actual count
3693          */
3694         return num_tc + 1;
3695 }
3696
3697 /**
3698  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
3699  * @dcbcfg: the corresponding DCBx configuration structure
3700  *
3701  * Query the current DCB configuration and return the number of
3702  * traffic classes enabled from the given DCBX config
3703  **/
3704 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
3705 {
3706         u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
3707         u8 enabled_tc = 1;
3708         u8 i;
3709
3710         for (i = 0; i < num_tc; i++)
3711                 enabled_tc |= 1 << i;
3712
3713         return enabled_tc;
3714 }
3715
3716 /**
3717  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
3718  * @pf: PF being queried
3719  *
3720  * Return number of traffic classes enabled for the given PF
3721  **/
3722 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
3723 {
3724         struct i40e_hw *hw = &pf->hw;
3725         u8 i, enabled_tc;
3726         u8 num_tc = 0;
3727         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3728
3729         /* If DCB is not enabled then always in single TC */
3730         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3731                 return 1;
3732
3733         /* MFP mode return count of enabled TCs for this PF */
3734         if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3735                 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3736                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3737                         if (enabled_tc & (1 << i))
3738                                 num_tc++;
3739                 }
3740                 return num_tc;
3741         }
3742
3743         /* SFP mode will be enabled for all TCs on port */
3744         return i40e_dcb_get_num_tc(dcbcfg);
3745 }
3746
3747 /**
3748  * i40e_pf_get_default_tc - Get bitmap for first enabled TC
3749  * @pf: PF being queried
3750  *
3751  * Return a bitmap for first enabled traffic class for this PF.
3752  **/
3753 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
3754 {
3755         u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3756         u8 i = 0;
3757
3758         if (!enabled_tc)
3759                 return 0x1; /* TC0 */
3760
3761         /* Find the first enabled TC */
3762         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3763                 if (enabled_tc & (1 << i))
3764                         break;
3765         }
3766
3767         return 1 << i;
3768 }
3769
3770 /**
3771  * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
3772  * @pf: PF being queried
3773  *
3774  * Return a bitmap for enabled traffic classes for this PF.
3775  **/
3776 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
3777 {
3778         /* If DCB is not enabled for this PF then just return default TC */
3779         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3780                 return i40e_pf_get_default_tc(pf);
3781
3782         /* MFP mode will have enabled TCs set by FW */
3783         if (pf->flags & I40E_FLAG_MFP_ENABLED)
3784                 return pf->hw.func_caps.enabled_tcmap;
3785
3786         /* SFP mode we want PF to be enabled for all TCs */
3787         return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
3788 }
3789
3790 /**
3791  * i40e_vsi_get_bw_info - Query VSI BW Information
3792  * @vsi: the VSI being queried
3793  *
3794  * Returns 0 on success, negative value on failure
3795  **/
3796 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3797 {
3798         struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
3799         struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3800         struct i40e_pf *pf = vsi->back;
3801         struct i40e_hw *hw = &pf->hw;
3802         i40e_status aq_ret;
3803         u32 tc_bw_max;
3804         int i;
3805
3806         /* Get the VSI level BW configuration */
3807         aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3808         if (aq_ret) {
3809                 dev_info(&pf->pdev->dev,
3810                          "couldn't get pf vsi bw config, err %d, aq_err %d\n",
3811                          aq_ret, pf->hw.aq.asq_last_status);
3812                 return -EINVAL;
3813         }
3814
3815         /* Get the VSI level BW configuration per TC */
3816         aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
3817                                                   NULL);
3818         if (aq_ret) {
3819                 dev_info(&pf->pdev->dev,
3820                          "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
3821                          aq_ret, pf->hw.aq.asq_last_status);
3822                 return -EINVAL;
3823         }
3824
3825         if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
3826                 dev_info(&pf->pdev->dev,
3827                          "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
3828                          bw_config.tc_valid_bits,
3829                          bw_ets_config.tc_valid_bits);
3830                 /* Still continuing */
3831         }
3832
3833         vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
3834         vsi->bw_max_quanta = bw_config.max_bw;
3835         tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
3836                     (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
3837         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3838                 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
3839                 vsi->bw_ets_limit_credits[i] =
3840                                         le16_to_cpu(bw_ets_config.credits[i]);
3841                 /* 3 bits out of 4 for each TC */
3842                 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3843         }
3844
3845         return 0;
3846 }
3847
3848 /**
3849  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
3850  * @vsi: the VSI being configured
3851  * @enabled_tc: TC bitmap
3852  * @bw_credits: BW shared credits per TC
3853  *
3854  * Returns 0 on success, negative value on failure
3855  **/
3856 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
3857                                        u8 *bw_share)
3858 {
3859         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
3860         i40e_status aq_ret;
3861         int i;
3862
3863         bw_data.tc_valid_bits = enabled_tc;
3864         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3865                 bw_data.tc_bw_credits[i] = bw_share[i];
3866
3867         aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3868                                           NULL);
3869         if (aq_ret) {
3870                 dev_info(&vsi->back->pdev->dev,
3871                          "AQ command Config VSI BW allocation per TC failed = %d\n",
3872                          vsi->back->hw.aq.asq_last_status);
3873                 return -EINVAL;
3874         }
3875
3876         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3877                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3878
3879         return 0;
3880 }
3881
3882 /**
3883  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
3884  * @vsi: the VSI being configured
3885  * @enabled_tc: TC map to be enabled
3886  *
3887  **/
3888 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3889 {
3890         struct net_device *netdev = vsi->netdev;
3891         struct i40e_pf *pf = vsi->back;
3892         struct i40e_hw *hw = &pf->hw;
3893         u8 netdev_tc = 0;
3894         int i;
3895         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3896
3897         if (!netdev)
3898                 return;
3899
3900         if (!enabled_tc) {
3901                 netdev_reset_tc(netdev);
3902                 return;
3903         }
3904
3905         /* Set up actual enabled TCs on the VSI */
3906         if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
3907                 return;
3908
3909         /* set per TC queues for the VSI */
3910         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3911                 /* Only set TC queues for enabled tcs
3912                  *
3913                  * e.g. For a VSI that has TC0 and TC3 enabled the
3914                  * enabled_tc bitmap would be 0x00001001; the driver
3915                  * will set the numtc for netdev as 2 that will be
3916                  * referenced by the netdev layer as TC 0 and 1.
3917                  */
3918                 if (vsi->tc_config.enabled_tc & (1 << i))
3919                         netdev_set_tc_queue(netdev,
3920                                         vsi->tc_config.tc_info[i].netdev_tc,
3921                                         vsi->tc_config.tc_info[i].qcount,
3922                                         vsi->tc_config.tc_info[i].qoffset);
3923         }
3924
3925         /* Assign UP2TC map for the VSI */
3926         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3927                 /* Get the actual TC# for the UP */
3928                 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
3929                 /* Get the mapped netdev TC# for the UP */
3930                 netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
3931                 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3932         }
3933 }
3934
3935 /**
3936  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
3937  * @vsi: the VSI being configured
3938  * @ctxt: the ctxt buffer returned from AQ VSI update param command
3939  **/
3940 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
3941                                       struct i40e_vsi_context *ctxt)
3942 {
3943         /* copy just the sections touched not the entire info
3944          * since not all sections are valid as returned by
3945          * update vsi params
3946          */
3947         vsi->info.mapping_flags = ctxt->info.mapping_flags;
3948         memcpy(&vsi->info.queue_mapping,
3949                &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
3950         memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
3951                sizeof(vsi->info.tc_mapping));
3952 }
3953
3954 /**
3955  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
3956  * @vsi: VSI to be configured
3957  * @enabled_tc: TC bitmap
3958  *
3959  * This configures a particular VSI for TCs that are mapped to the
3960  * given TC bitmap. It uses default bandwidth share for TCs across
3961  * VSIs to configure TC for a particular VSI.
3962  *
3963  * NOTE:
3964  * It is expected that the VSI queues have been quisced before calling
3965  * this function.
3966  **/
3967 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3968 {
3969         u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
3970         struct i40e_vsi_context ctxt;
3971         int ret = 0;
3972         int i;
3973
3974         /* Check if enabled_tc is same as existing or new TCs */
3975         if (vsi->tc_config.enabled_tc == enabled_tc)
3976                 return ret;
3977
3978         /* Enable ETS TCs with equal BW Share for now across all VSIs */
3979         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3980                 if (enabled_tc & (1 << i))
3981                         bw_share[i] = 1;
3982         }
3983
3984         ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
3985         if (ret) {
3986                 dev_info(&vsi->back->pdev->dev,
3987                          "Failed configuring TC map %d for VSI %d\n",
3988                          enabled_tc, vsi->seid);
3989                 goto out;
3990         }
3991
3992         /* Update Queue Pairs Mapping for currently enabled UPs */
3993         ctxt.seid = vsi->seid;
3994         ctxt.pf_num = vsi->back->hw.pf_id;
3995         ctxt.vf_num = 0;
3996         ctxt.uplink_seid = vsi->uplink_seid;
3997         memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3998         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
3999
4000         /* Update the VSI after updating the VSI queue-mapping information */
4001         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4002         if (ret) {
4003                 dev_info(&vsi->back->pdev->dev,
4004                          "update vsi failed, aq_err=%d\n",
4005                          vsi->back->hw.aq.asq_last_status);
4006                 goto out;
4007         }
4008         /* update the local VSI info with updated queue map */
4009         i40e_vsi_update_queue_map(vsi, &ctxt);
4010         vsi->info.valid_sections = 0;
4011
4012         /* Update current VSI BW information */
4013         ret = i40e_vsi_get_bw_info(vsi);
4014         if (ret) {
4015                 dev_info(&vsi->back->pdev->dev,
4016                          "Failed updating vsi bw info, aq_err=%d\n",
4017                          vsi->back->hw.aq.asq_last_status);
4018                 goto out;
4019         }
4020
4021         /* Update the netdev TC setup */
4022         i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4023 out:
4024         return ret;
4025 }
4026
4027 /**
4028  * i40e_veb_config_tc - Configure TCs for given VEB
4029  * @veb: given VEB
4030  * @enabled_tc: TC bitmap
4031  *
4032  * Configures given TC bitmap for VEB (switching) element
4033  **/
4034 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4035 {
4036         struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4037         struct i40e_pf *pf = veb->pf;
4038         int ret = 0;
4039         int i;
4040
4041         /* No TCs or already enabled TCs just return */
4042         if (!enabled_tc || veb->enabled_tc == enabled_tc)
4043                 return ret;
4044
4045         bw_data.tc_valid_bits = enabled_tc;
4046         /* bw_data.absolute_credits is not set (relative) */
4047
4048         /* Enable ETS TCs with equal BW Share for now */
4049         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4050                 if (enabled_tc & (1 << i))
4051                         bw_data.tc_bw_share_credits[i] = 1;
4052         }
4053
4054         ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4055                                                    &bw_data, NULL);
4056         if (ret) {
4057                 dev_info(&pf->pdev->dev,
4058                          "veb bw config failed, aq_err=%d\n",
4059                          pf->hw.aq.asq_last_status);
4060                 goto out;
4061         }
4062
4063         /* Update the BW information */
4064         ret = i40e_veb_get_bw_info(veb);
4065         if (ret) {
4066                 dev_info(&pf->pdev->dev,
4067                          "Failed getting veb bw config, aq_err=%d\n",
4068                          pf->hw.aq.asq_last_status);
4069         }
4070
4071 out:
4072         return ret;
4073 }
4074
4075 #ifdef CONFIG_I40E_DCB
4076 /**
4077  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4078  * @pf: PF struct
4079  *
4080  * Reconfigure VEB/VSIs on a given PF; it is assumed that
4081  * the caller would've quiesce all the VSIs before calling
4082  * this function
4083  **/
4084 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4085 {
4086         u8 tc_map = 0;
4087         int ret;
4088         u8 v;
4089
4090         /* Enable the TCs available on PF to all VEBs */
4091         tc_map = i40e_pf_get_tc_map(pf);
4092         for (v = 0; v < I40E_MAX_VEB; v++) {
4093                 if (!pf->veb[v])
4094                         continue;
4095                 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4096                 if (ret) {
4097                         dev_info(&pf->pdev->dev,
4098                                  "Failed configuring TC for VEB seid=%d\n",
4099                                  pf->veb[v]->seid);
4100                         /* Will try to configure as many components */
4101                 }
4102         }
4103
4104         /* Update each VSI */
4105         for (v = 0; v < pf->num_alloc_vsi; v++) {
4106                 if (!pf->vsi[v])
4107                         continue;
4108
4109                 /* - Enable all TCs for the LAN VSI
4110                  * - For all others keep them at TC0 for now
4111                  */
4112                 if (v == pf->lan_vsi)
4113                         tc_map = i40e_pf_get_tc_map(pf);
4114                 else
4115                         tc_map = i40e_pf_get_default_tc(pf);
4116
4117                 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4118                 if (ret) {
4119                         dev_info(&pf->pdev->dev,
4120                                  "Failed configuring TC for VSI seid=%d\n",
4121                                  pf->vsi[v]->seid);
4122                         /* Will try to configure as many components */
4123                 } else {
4124                         /* Re-configure VSI vectors based on updated TC map */
4125                         i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4126                         if (pf->vsi[v]->netdev)
4127                                 i40e_dcbnl_set_all(pf->vsi[v]);
4128                 }
4129         }
4130 }
4131
4132 /**
4133  * i40e_init_pf_dcb - Initialize DCB configuration
4134  * @pf: PF being configured
4135  *
4136  * Query the current DCB configuration and cache it
4137  * in the hardware structure
4138  **/
4139 static int i40e_init_pf_dcb(struct i40e_pf *pf)
4140 {
4141         struct i40e_hw *hw = &pf->hw;
4142         int err = 0;
4143
4144         if (pf->hw.func_caps.npar_enable)
4145                 goto out;
4146
4147         /* Get the initial DCB configuration */
4148         err = i40e_init_dcb(hw);
4149         if (!err) {
4150                 /* Device/Function is not DCBX capable */
4151                 if ((!hw->func_caps.dcb) ||
4152                     (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4153                         dev_info(&pf->pdev->dev,
4154                                  "DCBX offload is not supported or is disabled for this PF.\n");
4155
4156                         if (pf->flags & I40E_FLAG_MFP_ENABLED)
4157                                 goto out;
4158
4159                 } else {
4160                         /* When status is not DISABLED then DCBX in FW */
4161                         pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4162                                        DCB_CAP_DCBX_VER_IEEE;
4163
4164                         pf->flags |= I40E_FLAG_DCB_CAPABLE;
4165                         /* Enable DCB tagging only when more than one TC */
4166                         if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4167                                 pf->flags |= I40E_FLAG_DCB_ENABLED;
4168                 }
4169         } else {
4170                 dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
4171                          pf->hw.aq.asq_last_status);
4172         }
4173
4174 out:
4175         return err;
4176 }
4177 #endif /* CONFIG_I40E_DCB */
4178 #define SPEED_SIZE 14
4179 #define FC_SIZE 8
4180 /**
4181  * i40e_print_link_message - print link up or down
4182  * @vsi: the VSI for which link needs a message
4183  */
4184 static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4185 {
4186         char speed[SPEED_SIZE] = "Unknown";
4187         char fc[FC_SIZE] = "RX/TX";
4188
4189         if (!isup) {
4190                 netdev_info(vsi->netdev, "NIC Link is Down\n");
4191                 return;
4192         }
4193
4194         switch (vsi->back->hw.phy.link_info.link_speed) {
4195         case I40E_LINK_SPEED_40GB:
4196                 strncpy(speed, "40 Gbps", SPEED_SIZE);
4197                 break;
4198         case I40E_LINK_SPEED_10GB:
4199                 strncpy(speed, "10 Gbps", SPEED_SIZE);
4200                 break;
4201         case I40E_LINK_SPEED_1GB:
4202                 strncpy(speed, "1000 Mbps", SPEED_SIZE);
4203                 break;
4204         default:
4205                 break;
4206         }
4207
4208         switch (vsi->back->hw.fc.current_mode) {
4209         case I40E_FC_FULL:
4210                 strncpy(fc, "RX/TX", FC_SIZE);
4211                 break;
4212         case I40E_FC_TX_PAUSE:
4213                 strncpy(fc, "TX", FC_SIZE);
4214                 break;
4215         case I40E_FC_RX_PAUSE:
4216                 strncpy(fc, "RX", FC_SIZE);
4217                 break;
4218         default:
4219                 strncpy(fc, "None", FC_SIZE);
4220                 break;
4221         }
4222
4223         netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4224                     speed, fc);
4225 }
4226
4227 /**
4228  * i40e_up_complete - Finish the last steps of bringing up a connection
4229  * @vsi: the VSI being configured
4230  **/
4231 static int i40e_up_complete(struct i40e_vsi *vsi)
4232 {
4233         struct i40e_pf *pf = vsi->back;
4234         int err;
4235
4236         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4237                 i40e_vsi_configure_msix(vsi);
4238         else
4239                 i40e_configure_msi_and_legacy(vsi);
4240
4241         /* start rings */
4242         err = i40e_vsi_control_rings(vsi, true);
4243         if (err)
4244                 return err;
4245
4246         clear_bit(__I40E_DOWN, &vsi->state);
4247         i40e_napi_enable_all(vsi);
4248         i40e_vsi_enable_irq(vsi);
4249
4250         if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4251             (vsi->netdev)) {
4252                 i40e_print_link_message(vsi, true);
4253                 netif_tx_start_all_queues(vsi->netdev);
4254                 netif_carrier_on(vsi->netdev);
4255         } else if (vsi->netdev) {
4256                 i40e_print_link_message(vsi, false);
4257         }
4258
4259         /* replay FDIR SB filters */
4260         if (vsi->type == I40E_VSI_FDIR)
4261                 i40e_fdir_filter_restore(vsi);
4262         i40e_service_event_schedule(pf);
4263
4264         return 0;
4265 }
4266
4267 /**
4268  * i40e_vsi_reinit_locked - Reset the VSI
4269  * @vsi: the VSI being configured
4270  *
4271  * Rebuild the ring structs after some configuration
4272  * has changed, e.g. MTU size.
4273  **/
4274 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4275 {
4276         struct i40e_pf *pf = vsi->back;
4277
4278         WARN_ON(in_interrupt());
4279         while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4280                 usleep_range(1000, 2000);
4281         i40e_down(vsi);
4282
4283         /* Give a VF some time to respond to the reset.  The
4284          * two second wait is based upon the watchdog cycle in
4285          * the VF driver.
4286          */
4287         if (vsi->type == I40E_VSI_SRIOV)
4288                 msleep(2000);
4289         i40e_up(vsi);
4290         clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4291 }
4292
4293 /**
4294  * i40e_up - Bring the connection back up after being down
4295  * @vsi: the VSI being configured
4296  **/
4297 int i40e_up(struct i40e_vsi *vsi)
4298 {
4299         int err;
4300
4301         err = i40e_vsi_configure(vsi);
4302         if (!err)
4303                 err = i40e_up_complete(vsi);
4304
4305         return err;
4306 }
4307
4308 /**
4309  * i40e_down - Shutdown the connection processing
4310  * @vsi: the VSI being stopped
4311  **/
4312 void i40e_down(struct i40e_vsi *vsi)
4313 {
4314         int i;
4315
4316         /* It is assumed that the caller of this function
4317          * sets the vsi->state __I40E_DOWN bit.
4318          */
4319         if (vsi->netdev) {
4320                 netif_carrier_off(vsi->netdev);
4321                 netif_tx_disable(vsi->netdev);
4322         }
4323         i40e_vsi_disable_irq(vsi);
4324         i40e_vsi_control_rings(vsi, false);
4325         i40e_napi_disable_all(vsi);
4326
4327         for (i = 0; i < vsi->num_queue_pairs; i++) {
4328                 i40e_clean_tx_ring(vsi->tx_rings[i]);
4329                 i40e_clean_rx_ring(vsi->rx_rings[i]);
4330         }
4331 }
4332
4333 /**
4334  * i40e_setup_tc - configure multiple traffic classes
4335  * @netdev: net device to configure
4336  * @tc: number of traffic classes to enable
4337  **/
4338 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
4339 {
4340         struct i40e_netdev_priv *np = netdev_priv(netdev);
4341         struct i40e_vsi *vsi = np->vsi;
4342         struct i40e_pf *pf = vsi->back;
4343         u8 enabled_tc = 0;
4344         int ret = -EINVAL;
4345         int i;
4346
4347         /* Check if DCB enabled to continue */
4348         if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
4349                 netdev_info(netdev, "DCB is not enabled for adapter\n");
4350                 goto exit;
4351         }
4352
4353         /* Check if MFP enabled */
4354         if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4355                 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
4356                 goto exit;
4357         }
4358
4359         /* Check whether tc count is within enabled limit */
4360         if (tc > i40e_pf_get_num_tc(pf)) {
4361                 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
4362                 goto exit;
4363         }
4364
4365         /* Generate TC map for number of tc requested */
4366         for (i = 0; i < tc; i++)
4367                 enabled_tc |= (1 << i);
4368
4369         /* Requesting same TC configuration as already enabled */
4370         if (enabled_tc == vsi->tc_config.enabled_tc)
4371                 return 0;
4372
4373         /* Quiesce VSI queues */
4374         i40e_quiesce_vsi(vsi);
4375
4376         /* Configure VSI for enabled TCs */
4377         ret = i40e_vsi_config_tc(vsi, enabled_tc);
4378         if (ret) {
4379                 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
4380                             vsi->seid);
4381                 goto exit;
4382         }
4383
4384         /* Unquiesce VSI */
4385         i40e_unquiesce_vsi(vsi);
4386
4387 exit:
4388         return ret;
4389 }
4390
4391 /**
4392  * i40e_open - Called when a network interface is made active
4393  * @netdev: network interface device structure
4394  *
4395  * The open entry point is called when a network interface is made
4396  * active by the system (IFF_UP).  At this point all resources needed
4397  * for transmit and receive operations are allocated, the interrupt
4398  * handler is registered with the OS, the netdev watchdog subtask is
4399  * enabled, and the stack is notified that the interface is ready.
4400  *
4401  * Returns 0 on success, negative value on failure
4402  **/
4403 static int i40e_open(struct net_device *netdev)
4404 {
4405         struct i40e_netdev_priv *np = netdev_priv(netdev);
4406         struct i40e_vsi *vsi = np->vsi;
4407         struct i40e_pf *pf = vsi->back;
4408         int err;
4409
4410         /* disallow open during test or if eeprom is broken */
4411         if (test_bit(__I40E_TESTING, &pf->state) ||
4412             test_bit(__I40E_BAD_EEPROM, &pf->state))
4413                 return -EBUSY;
4414
4415         netif_carrier_off(netdev);
4416
4417         err = i40e_vsi_open(vsi);
4418         if (err)
4419                 return err;
4420
4421         /* configure global TSO hardware offload settings */
4422         wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4423                                                        TCP_FLAG_FIN) >> 16);
4424         wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4425                                                        TCP_FLAG_FIN |
4426                                                        TCP_FLAG_CWR) >> 16);
4427         wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4428
4429 #ifdef CONFIG_I40E_VXLAN
4430         vxlan_get_rx_port(netdev);
4431 #endif
4432
4433         return 0;
4434 }
4435
4436 /**
4437  * i40e_vsi_open -
4438  * @vsi: the VSI to open
4439  *
4440  * Finish initialization of the VSI.
4441  *
4442  * Returns 0 on success, negative value on failure
4443  **/
4444 int i40e_vsi_open(struct i40e_vsi *vsi)
4445 {
4446         struct i40e_pf *pf = vsi->back;
4447         char int_name[IFNAMSIZ];
4448         int err;
4449
4450         /* allocate descriptors */
4451         err = i40e_vsi_setup_tx_resources(vsi);
4452         if (err)
4453                 goto err_setup_tx;
4454         err = i40e_vsi_setup_rx_resources(vsi);
4455         if (err)
4456                 goto err_setup_rx;
4457
4458         err = i40e_vsi_configure(vsi);
4459         if (err)
4460                 goto err_setup_rx;
4461
4462         if (vsi->netdev) {
4463                 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4464                          dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4465                 err = i40e_vsi_request_irq(vsi, int_name);
4466                 if (err)
4467                         goto err_setup_rx;
4468
4469                 /* Notify the stack of the actual queue counts. */
4470                 err = netif_set_real_num_tx_queues(vsi->netdev,
4471                                                    vsi->num_queue_pairs);
4472                 if (err)
4473                         goto err_set_queues;
4474
4475                 err = netif_set_real_num_rx_queues(vsi->netdev,
4476                                                    vsi->num_queue_pairs);
4477                 if (err)
4478                         goto err_set_queues;
4479
4480         } else if (vsi->type == I40E_VSI_FDIR) {
4481                 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4482                          dev_driver_string(&pf->pdev->dev));
4483                 err = i40e_vsi_request_irq(vsi, int_name);
4484         } else {
4485                 err = -EINVAL;
4486                 goto err_setup_rx;
4487         }
4488
4489         err = i40e_up_complete(vsi);
4490         if (err)
4491                 goto err_up_complete;
4492
4493         return 0;
4494
4495 err_up_complete:
4496         i40e_down(vsi);
4497 err_set_queues:
4498         i40e_vsi_free_irq(vsi);
4499 err_setup_rx:
4500         i40e_vsi_free_rx_resources(vsi);
4501 err_setup_tx:
4502         i40e_vsi_free_tx_resources(vsi);
4503         if (vsi == pf->vsi[pf->lan_vsi])
4504                 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
4505
4506         return err;
4507 }
4508
4509 /**
4510  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
4511  * @pf: Pointer to pf
4512  *
4513  * This function destroys the hlist where all the Flow Director
4514  * filters were saved.
4515  **/
4516 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
4517 {
4518         struct i40e_fdir_filter *filter;
4519         struct hlist_node *node2;
4520
4521         hlist_for_each_entry_safe(filter, node2,
4522                                   &pf->fdir_filter_list, fdir_node) {
4523                 hlist_del(&filter->fdir_node);
4524                 kfree(filter);
4525         }
4526         pf->fdir_pf_active_filters = 0;
4527 }
4528
4529 /**
4530  * i40e_close - Disables a network interface
4531  * @netdev: network interface device structure
4532  *
4533  * The close entry point is called when an interface is de-activated
4534  * by the OS.  The hardware is still under the driver's control, but
4535  * this netdev interface is disabled.
4536  *
4537  * Returns 0, this is not allowed to fail
4538  **/
4539 static int i40e_close(struct net_device *netdev)
4540 {
4541         struct i40e_netdev_priv *np = netdev_priv(netdev);
4542         struct i40e_vsi *vsi = np->vsi;
4543
4544         i40e_vsi_close(vsi);
4545
4546         return 0;
4547 }
4548
4549 /**
4550  * i40e_do_reset - Start a PF or Core Reset sequence
4551  * @pf: board private structure
4552  * @reset_flags: which reset is requested
4553  *
4554  * The essential difference in resets is that the PF Reset
4555  * doesn't clear the packet buffers, doesn't reset the PE
4556  * firmware, and doesn't bother the other PFs on the chip.
4557  **/
4558 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4559 {
4560         u32 val;
4561
4562         WARN_ON(in_interrupt());
4563
4564         if (i40e_check_asq_alive(&pf->hw))
4565                 i40e_vc_notify_reset(pf);
4566
4567         /* do the biggest reset indicated */
4568         if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
4569
4570                 /* Request a Global Reset
4571                  *
4572                  * This will start the chip's countdown to the actual full
4573                  * chip reset event, and a warning interrupt to be sent
4574                  * to all PFs, including the requestor.  Our handler
4575                  * for the warning interrupt will deal with the shutdown
4576                  * and recovery of the switch setup.
4577                  */
4578                 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
4579                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4580                 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
4581                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4582
4583         } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
4584
4585                 /* Request a Core Reset
4586                  *
4587                  * Same as Global Reset, except does *not* include the MAC/PHY
4588                  */
4589                 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
4590                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4591                 val |= I40E_GLGEN_RTRIG_CORER_MASK;
4592                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4593                 i40e_flush(&pf->hw);
4594
4595         } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
4596
4597                 /* Request a Firmware Reset
4598                  *
4599                  * Same as Global reset, plus restarting the
4600                  * embedded firmware engine.
4601                  */
4602                 /* enable EMP Reset */
4603                 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
4604                 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
4605                 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
4606
4607                 /* force the reset */
4608                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4609                 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
4610                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4611                 i40e_flush(&pf->hw);
4612
4613         } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
4614
4615                 /* Request a PF Reset
4616                  *
4617                  * Resets only the PF-specific registers
4618                  *
4619                  * This goes directly to the tear-down and rebuild of
4620                  * the switch, since we need to do all the recovery as
4621                  * for the Core Reset.
4622                  */
4623                 dev_dbg(&pf->pdev->dev, "PFR requested\n");
4624                 i40e_handle_reset_warning(pf);
4625
4626         } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
4627                 int v;
4628
4629                 /* Find the VSI(s) that requested a re-init */
4630                 dev_info(&pf->pdev->dev,
4631                          "VSI reinit requested\n");
4632                 for (v = 0; v < pf->num_alloc_vsi; v++) {
4633                         struct i40e_vsi *vsi = pf->vsi[v];
4634                         if (vsi != NULL &&
4635                             test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
4636                                 i40e_vsi_reinit_locked(pf->vsi[v]);
4637                                 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
4638                         }
4639                 }
4640
4641                 /* no further action needed, so return now */
4642                 return;
4643         } else {
4644                 dev_info(&pf->pdev->dev,
4645                          "bad reset request 0x%08x\n", reset_flags);
4646                 return;
4647         }
4648 }
4649
4650 #ifdef CONFIG_I40E_DCB
4651 /**
4652  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
4653  * @pf: board private structure
4654  * @old_cfg: current DCB config
4655  * @new_cfg: new DCB config
4656  **/
4657 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
4658                             struct i40e_dcbx_config *old_cfg,
4659                             struct i40e_dcbx_config *new_cfg)
4660 {
4661         bool need_reconfig = false;
4662
4663         /* Check if ETS configuration has changed */
4664         if (memcmp(&new_cfg->etscfg,
4665                    &old_cfg->etscfg,
4666                    sizeof(new_cfg->etscfg))) {
4667                 /* If Priority Table has changed reconfig is needed */
4668                 if (memcmp(&new_cfg->etscfg.prioritytable,
4669                            &old_cfg->etscfg.prioritytable,
4670                            sizeof(new_cfg->etscfg.prioritytable))) {
4671                         need_reconfig = true;
4672                         dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
4673                 }
4674
4675                 if (memcmp(&new_cfg->etscfg.tcbwtable,
4676                            &old_cfg->etscfg.tcbwtable,
4677                            sizeof(new_cfg->etscfg.tcbwtable)))
4678                         dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
4679
4680                 if (memcmp(&new_cfg->etscfg.tsatable,
4681                            &old_cfg->etscfg.tsatable,
4682                            sizeof(new_cfg->etscfg.tsatable)))
4683                         dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
4684         }
4685
4686         /* Check if PFC configuration has changed */
4687         if (memcmp(&new_cfg->pfc,
4688                    &old_cfg->pfc,
4689                    sizeof(new_cfg->pfc))) {
4690                 need_reconfig = true;
4691                 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
4692         }
4693
4694         /* Check if APP Table has changed */
4695         if (memcmp(&new_cfg->app,
4696                    &old_cfg->app,
4697                    sizeof(new_cfg->app))) {
4698                 need_reconfig = true;
4699                 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
4700         }
4701
4702         return need_reconfig;
4703 }
4704
4705 /**
4706  * i40e_handle_lldp_event - Handle LLDP Change MIB event
4707  * @pf: board private structure
4708  * @e: event info posted on ARQ
4709  **/
4710 static int i40e_handle_lldp_event(struct i40e_pf *pf,
4711                                   struct i40e_arq_event_info *e)
4712 {
4713         struct i40e_aqc_lldp_get_mib *mib =
4714                 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
4715         struct i40e_hw *hw = &pf->hw;
4716         struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
4717         struct i40e_dcbx_config tmp_dcbx_cfg;
4718         bool need_reconfig = false;
4719         int ret = 0;
4720         u8 type;
4721
4722         /* Not DCB capable or capability disabled */
4723         if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
4724                 return ret;
4725
4726         /* Ignore if event is not for Nearest Bridge */
4727         type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
4728                 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
4729         if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
4730                 return ret;
4731
4732         /* Check MIB Type and return if event for Remote MIB update */
4733         type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
4734         if (type == I40E_AQ_LLDP_MIB_REMOTE) {
4735                 /* Update the remote cached instance and return */
4736                 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
4737                                 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
4738                                 &hw->remote_dcbx_config);
4739                 goto exit;
4740         }
4741
4742         /* Convert/store the DCBX data from LLDPDU temporarily */
4743         memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
4744         ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg);
4745         if (ret) {
4746                 /* Error in LLDPDU parsing return */
4747                 dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n");
4748                 goto exit;
4749         }
4750
4751         /* No change detected in DCBX configs */
4752         if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
4753                 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
4754                 goto exit;
4755         }
4756
4757         need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg);
4758
4759         i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg);
4760
4761         /* Overwrite the new configuration */
4762         *dcbx_cfg = tmp_dcbx_cfg;
4763
4764         if (!need_reconfig)
4765                 goto exit;
4766
4767         /* Enable DCB tagging only when more than one TC */
4768         if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
4769                 pf->flags |= I40E_FLAG_DCB_ENABLED;
4770         else
4771                 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
4772
4773         /* Reconfiguration needed quiesce all VSIs */
4774         i40e_pf_quiesce_all_vsi(pf);
4775
4776         /* Changes in configuration update VEB/VSI */
4777         i40e_dcb_reconfigure(pf);
4778
4779         i40e_pf_unquiesce_all_vsi(pf);
4780 exit:
4781         return ret;
4782 }
4783 #endif /* CONFIG_I40E_DCB */
4784
4785 /**
4786  * i40e_do_reset_safe - Protected reset path for userland calls.
4787  * @pf: board private structure
4788  * @reset_flags: which reset is requested
4789  *
4790  **/
4791 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
4792 {
4793         rtnl_lock();
4794         i40e_do_reset(pf, reset_flags);
4795         rtnl_unlock();
4796 }
4797
4798 /**
4799  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
4800  * @pf: board private structure
4801  * @e: event info posted on ARQ
4802  *
4803  * Handler for LAN Queue Overflow Event generated by the firmware for PF
4804  * and VF queues
4805  **/
4806 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
4807                                            struct i40e_arq_event_info *e)
4808 {
4809         struct i40e_aqc_lan_overflow *data =
4810                 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
4811         u32 queue = le32_to_cpu(data->prtdcb_rupto);
4812         u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
4813         struct i40e_hw *hw = &pf->hw;
4814         struct i40e_vf *vf;
4815         u16 vf_id;
4816
4817         dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
4818                 queue, qtx_ctl);
4819
4820         /* Queue belongs to VF, find the VF and issue VF reset */
4821         if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
4822             >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
4823                 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
4824                          >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
4825                 vf_id -= hw->func_caps.vf_base_id;
4826                 vf = &pf->vf[vf_id];
4827                 i40e_vc_notify_vf_reset(vf);
4828                 /* Allow VF to process pending reset notification */
4829                 msleep(20);
4830                 i40e_reset_vf(vf, false);
4831         }
4832 }
4833
4834 /**
4835  * i40e_service_event_complete - Finish up the service event
4836  * @pf: board private structure
4837  **/
4838 static void i40e_service_event_complete(struct i40e_pf *pf)
4839 {
4840         BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
4841
4842         /* flush memory to make sure state is correct before next watchog */
4843         smp_mb__before_atomic();
4844         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
4845 }
4846
4847 /**
4848  * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW
4849  * @pf: board private structure
4850  **/
4851 int i40e_get_current_fd_count(struct i40e_pf *pf)
4852 {
4853         int val, fcnt_prog;
4854         val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
4855         fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
4856                     ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
4857                       I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
4858         return fcnt_prog;
4859 }
4860
4861 /**
4862  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
4863  * @pf: board private structure
4864  **/
4865 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
4866 {
4867         u32 fcnt_prog, fcnt_avail;
4868
4869         /* Check if, FD SB or ATR was auto disabled and if there is enough room
4870          * to re-enable
4871          */
4872         if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4873             (pf->flags & I40E_FLAG_FD_SB_ENABLED))
4874                 return;
4875         fcnt_prog = i40e_get_current_fd_count(pf);
4876         fcnt_avail = i40e_get_fd_cnt_all(pf);
4877         if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
4878                 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
4879                     (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
4880                         pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
4881                         dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
4882                 }
4883         }
4884         /* Wait for some more space to be available to turn on ATR */
4885         if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
4886                 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4887                     (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
4888                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4889                         dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
4890                 }
4891         }
4892 }
4893
4894 /**
4895  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
4896  * @pf: board private structure
4897  **/
4898 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
4899 {
4900         if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
4901                 return;
4902
4903         /* if interface is down do nothing */
4904         if (test_bit(__I40E_DOWN, &pf->state))
4905                 return;
4906         i40e_fdir_check_and_reenable(pf);
4907
4908         if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4909             (pf->flags & I40E_FLAG_FD_SB_ENABLED))
4910                 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
4911 }
4912
4913 /**
4914  * i40e_vsi_link_event - notify VSI of a link event
4915  * @vsi: vsi to be notified
4916  * @link_up: link up or down
4917  **/
4918 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
4919 {
4920         if (!vsi)
4921                 return;
4922
4923         switch (vsi->type) {
4924         case I40E_VSI_MAIN:
4925                 if (!vsi->netdev || !vsi->netdev_registered)
4926                         break;
4927
4928                 if (link_up) {
4929                         netif_carrier_on(vsi->netdev);
4930                         netif_tx_wake_all_queues(vsi->netdev);
4931                 } else {
4932                         netif_carrier_off(vsi->netdev);
4933                         netif_tx_stop_all_queues(vsi->netdev);
4934                 }
4935                 break;
4936
4937         case I40E_VSI_SRIOV:
4938                 break;
4939
4940         case I40E_VSI_VMDQ2:
4941         case I40E_VSI_CTRL:
4942         case I40E_VSI_MIRROR:
4943         default:
4944                 /* there is no notification for other VSIs */
4945                 break;
4946         }
4947 }
4948
4949 /**
4950  * i40e_veb_link_event - notify elements on the veb of a link event
4951  * @veb: veb to be notified
4952  * @link_up: link up or down
4953  **/
4954 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4955 {
4956         struct i40e_pf *pf;
4957         int i;
4958
4959         if (!veb || !veb->pf)
4960                 return;
4961         pf = veb->pf;
4962
4963         /* depth first... */
4964         for (i = 0; i < I40E_MAX_VEB; i++)
4965                 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
4966                         i40e_veb_link_event(pf->veb[i], link_up);
4967
4968         /* ... now the local VSIs */
4969         for (i = 0; i < pf->num_alloc_vsi; i++)
4970                 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4971                         i40e_vsi_link_event(pf->vsi[i], link_up);
4972 }
4973
4974 /**
4975  * i40e_link_event - Update netif_carrier status
4976  * @pf: board private structure
4977  **/
4978 static void i40e_link_event(struct i40e_pf *pf)
4979 {
4980         bool new_link, old_link;
4981
4982         new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
4983         old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
4984
4985         if (new_link == old_link)
4986                 return;
4987         if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
4988                 i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);
4989
4990         /* Notify the base of the switch tree connected to
4991          * the link.  Floating VEBs are not notified.
4992          */
4993         if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
4994                 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
4995         else
4996                 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
4997
4998         if (pf->vf)
4999                 i40e_vc_notify_link_state(pf);
5000
5001         if (pf->flags & I40E_FLAG_PTP)
5002                 i40e_ptp_set_increment(pf);
5003 }
5004
5005 /**
5006  * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
5007  * @pf: board private structure
5008  *
5009  * Set the per-queue flags to request a check for stuck queues in the irq
5010  * clean functions, then force interrupts to be sure the irq clean is called.
5011  **/
5012 static void i40e_check_hang_subtask(struct i40e_pf *pf)
5013 {
5014         int i, v;
5015
5016         /* If we're down or resetting, just bail */
5017         if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
5018                 return;
5019
5020         /* for each VSI/netdev
5021          *     for each Tx queue
5022          *         set the check flag
5023          *     for each q_vector
5024          *         force an interrupt
5025          */
5026         for (v = 0; v < pf->num_alloc_vsi; v++) {
5027                 struct i40e_vsi *vsi = pf->vsi[v];
5028                 int armed = 0;
5029
5030                 if (!pf->vsi[v] ||
5031                     test_bit(__I40E_DOWN, &vsi->state) ||
5032                     (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
5033                         continue;
5034
5035                 for (i = 0; i < vsi->num_queue_pairs; i++) {
5036                         set_check_for_tx_hang(vsi->tx_rings[i]);
5037                         if (test_bit(__I40E_HANG_CHECK_ARMED,
5038                                      &vsi->tx_rings[i]->state))
5039                                 armed++;
5040                 }
5041
5042                 if (armed) {
5043                         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
5044                                 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
5045                                      (I40E_PFINT_DYN_CTL0_INTENA_MASK |
5046                                       I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
5047                         } else {
5048                                 u16 vec = vsi->base_vector - 1;
5049                                 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
5050                                            I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
5051                                 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
5052                                         wr32(&vsi->back->hw,
5053                                              I40E_PFINT_DYN_CTLN(vec), val);
5054                         }
5055                         i40e_flush(&vsi->back->hw);
5056                 }
5057         }
5058 }
5059
5060 /**
5061  * i40e_watchdog_subtask - Check and bring link up
5062  * @pf: board private structure
5063  **/
5064 static void i40e_watchdog_subtask(struct i40e_pf *pf)
5065 {
5066         int i;
5067
5068         /* if interface is down do nothing */
5069         if (test_bit(__I40E_DOWN, &pf->state) ||
5070             test_bit(__I40E_CONFIG_BUSY, &pf->state))
5071                 return;
5072
5073         /* Update the stats for active netdevs so the network stack
5074          * can look at updated numbers whenever it cares to
5075          */
5076         for (i = 0; i < pf->num_alloc_vsi; i++)
5077                 if (pf->vsi[i] && pf->vsi[i]->netdev)
5078                         i40e_update_stats(pf->vsi[i]);
5079
5080         /* Update the stats for the active switching components */
5081         for (i = 0; i < I40E_MAX_VEB; i++)
5082                 if (pf->veb[i])
5083                         i40e_update_veb_stats(pf->veb[i]);
5084
5085         i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
5086 }
5087
5088 /**
5089  * i40e_reset_subtask - Set up for resetting the device and driver
5090  * @pf: board private structure
5091  **/
5092 static void i40e_reset_subtask(struct i40e_pf *pf)
5093 {
5094         u32 reset_flags = 0;
5095
5096         rtnl_lock();
5097         if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
5098                 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
5099                 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
5100         }
5101         if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
5102                 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
5103                 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5104         }
5105         if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
5106                 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
5107                 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
5108         }
5109         if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
5110                 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
5111                 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
5112         }
5113
5114         /* If there's a recovery already waiting, it takes
5115          * precedence before starting a new reset sequence.
5116          */
5117         if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
5118                 i40e_handle_reset_warning(pf);
5119                 goto unlock;
5120         }
5121
5122         /* If we're already down or resetting, just bail */
5123         if (reset_flags &&
5124             !test_bit(__I40E_DOWN, &pf->state) &&
5125             !test_bit(__I40E_CONFIG_BUSY, &pf->state))
5126                 i40e_do_reset(pf, reset_flags);
5127
5128 unlock:
5129         rtnl_unlock();
5130 }
5131
5132 /**
5133  * i40e_handle_link_event - Handle link event
5134  * @pf: board private structure
5135  * @e: event info posted on ARQ
5136  **/
5137 static void i40e_handle_link_event(struct i40e_pf *pf,
5138                                    struct i40e_arq_event_info *e)
5139 {
5140         struct i40e_hw *hw = &pf->hw;
5141         struct i40e_aqc_get_link_status *status =
5142                 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
5143         struct i40e_link_status *hw_link_info = &hw->phy.link_info;
5144
5145         /* save off old link status information */
5146         memcpy(&pf->hw.phy.link_info_old, hw_link_info,
5147                sizeof(pf->hw.phy.link_info_old));
5148
5149         /* update link status */
5150         hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
5151         hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
5152         hw_link_info->link_info = status->link_info;
5153         hw_link_info->an_info = status->an_info;
5154         hw_link_info->ext_info = status->ext_info;
5155         hw_link_info->lse_enable =
5156                 le16_to_cpu(status->command_flags) &
5157                             I40E_AQ_LSE_ENABLE;
5158
5159         /* process the event */
5160         i40e_link_event(pf);
5161
5162         /* Do a new status request to re-enable LSE reporting
5163          * and load new status information into the hw struct,
5164          * then see if the status changed while processing the
5165          * initial event.
5166          */
5167         i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
5168         i40e_link_event(pf);
5169 }
5170
5171 /**
5172  * i40e_clean_adminq_subtask - Clean the AdminQ rings
5173  * @pf: board private structure
5174  **/
5175 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5176 {
5177         struct i40e_arq_event_info event;
5178         struct i40e_hw *hw = &pf->hw;
5179         u16 pending, i = 0;
5180         i40e_status ret;
5181         u16 opcode;
5182         u32 oldval;
5183         u32 val;
5184
5185         if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
5186                 return;
5187
5188         /* check for error indications */
5189         val = rd32(&pf->hw, pf->hw.aq.arq.len);
5190         oldval = val;
5191         if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5192                 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5193                 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5194         }
5195         if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5196                 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5197                 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5198         }
5199         if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5200                 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5201                 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5202         }
5203         if (oldval != val)
5204                 wr32(&pf->hw, pf->hw.aq.arq.len, val);
5205
5206         val = rd32(&pf->hw, pf->hw.aq.asq.len);
5207         oldval = val;
5208         if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5209                 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5210                 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5211         }
5212         if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5213                 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5214                 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5215         }
5216         if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5217                 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5218                 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5219         }
5220         if (oldval != val)
5221                 wr32(&pf->hw, pf->hw.aq.asq.len, val);
5222
5223         event.msg_size = I40E_MAX_AQ_BUF_SIZE;
5224         event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
5225         if (!event.msg_buf)
5226                 return;
5227
5228         do {
5229                 event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
5230                 ret = i40e_clean_arq_element(hw, &event, &pending);
5231                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
5232                         dev_info(&pf->pdev->dev, "No ARQ event found\n");
5233                         break;
5234                 } else if (ret) {
5235                         dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
5236                         break;
5237                 }
5238
5239                 opcode = le16_to_cpu(event.desc.opcode);
5240                 switch (opcode) {
5241
5242                 case i40e_aqc_opc_get_link_status:
5243                         i40e_handle_link_event(pf, &event);
5244                         break;
5245                 case i40e_aqc_opc_send_msg_to_pf:
5246                         ret = i40e_vc_process_vf_msg(pf,
5247                                         le16_to_cpu(event.desc.retval),
5248                                         le32_to_cpu(event.desc.cookie_high),
5249                                         le32_to_cpu(event.desc.cookie_low),
5250                                         event.msg_buf,
5251                                         event.msg_size);
5252                         break;
5253                 case i40e_aqc_opc_lldp_update_mib:
5254                         dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
5255 #ifdef CONFIG_I40E_DCB
5256                         rtnl_lock();
5257                         ret = i40e_handle_lldp_event(pf, &event);
5258                         rtnl_unlock();
5259 #endif /* CONFIG_I40E_DCB */
5260                         break;
5261                 case i40e_aqc_opc_event_lan_overflow:
5262                         dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
5263                         i40e_handle_lan_overflow_event(pf, &event);
5264                         break;
5265                 case i40e_aqc_opc_send_msg_to_peer:
5266                         dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
5267                         break;
5268                 default:
5269                         dev_info(&pf->pdev->dev,
5270                                  "ARQ Error: Unknown event 0x%04x received\n",
5271                                  opcode);
5272                         break;
5273                 }
5274         } while (pending && (i++ < pf->adminq_work_limit));
5275
5276         clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
5277         /* re-enable Admin queue interrupt cause */
5278         val = rd32(hw, I40E_PFINT_ICR0_ENA);
5279         val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
5280         wr32(hw, I40E_PFINT_ICR0_ENA, val);
5281         i40e_flush(hw);
5282
5283         kfree(event.msg_buf);
5284 }
5285
5286 /**
5287  * i40e_verify_eeprom - make sure eeprom is good to use
5288  * @pf: board private structure
5289  **/
5290 static void i40e_verify_eeprom(struct i40e_pf *pf)
5291 {
5292         int err;
5293
5294         err = i40e_diag_eeprom_test(&pf->hw);
5295         if (err) {
5296                 /* retry in case of garbage read */
5297                 err = i40e_diag_eeprom_test(&pf->hw);
5298                 if (err) {
5299                         dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
5300                                  err);
5301                         set_bit(__I40E_BAD_EEPROM, &pf->state);
5302                 }
5303         }
5304
5305         if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
5306                 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
5307                 clear_bit(__I40E_BAD_EEPROM, &pf->state);
5308         }
5309 }
5310
5311 /**
5312  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
5313  * @veb: pointer to the VEB instance
5314  *
5315  * This is a recursive function that first builds the attached VSIs then
5316  * recurses in to build the next layer of VEB.  We track the connections
5317  * through our own index numbers because the seid's from the HW could
5318  * change across the reset.
5319  **/
5320 static int i40e_reconstitute_veb(struct i40e_veb *veb)
5321 {
5322         struct i40e_vsi *ctl_vsi = NULL;
5323         struct i40e_pf *pf = veb->pf;
5324         int v, veb_idx;
5325         int ret;
5326
5327         /* build VSI that owns this VEB, temporarily attached to base VEB */
5328         for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
5329                 if (pf->vsi[v] &&
5330                     pf->vsi[v]->veb_idx == veb->idx &&
5331                     pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
5332                         ctl_vsi = pf->vsi[v];
5333                         break;
5334                 }
5335         }
5336         if (!ctl_vsi) {
5337                 dev_info(&pf->pdev->dev,
5338                          "missing owner VSI for veb_idx %d\n", veb->idx);
5339                 ret = -ENOENT;
5340                 goto end_reconstitute;
5341         }
5342         if (ctl_vsi != pf->vsi[pf->lan_vsi])
5343                 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
5344         ret = i40e_add_vsi(ctl_vsi);
5345         if (ret) {
5346                 dev_info(&pf->pdev->dev,
5347                          "rebuild of owner VSI failed: %d\n", ret);
5348                 goto end_reconstitute;
5349         }
5350         i40e_vsi_reset_stats(ctl_vsi);
5351
5352         /* create the VEB in the switch and move the VSI onto the VEB */
5353         ret = i40e_add_veb(veb, ctl_vsi);
5354         if (ret)
5355                 goto end_reconstitute;
5356
5357         /* create the remaining VSIs attached to this VEB */
5358         for (v = 0; v < pf->num_alloc_vsi; v++) {
5359                 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
5360                         continue;
5361
5362                 if (pf->vsi[v]->veb_idx == veb->idx) {
5363                         struct i40e_vsi *vsi = pf->vsi[v];
5364                         vsi->uplink_seid = veb->seid;
5365                         ret = i40e_add_vsi(vsi);
5366                         if (ret) {
5367                                 dev_info(&pf->pdev->dev,
5368                                          "rebuild of vsi_idx %d failed: %d\n",
5369                                          v, ret);
5370                                 goto end_reconstitute;
5371                         }
5372                         i40e_vsi_reset_stats(vsi);
5373                 }
5374         }
5375
5376         /* create any VEBs attached to this VEB - RECURSION */
5377         for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
5378                 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
5379                         pf->veb[veb_idx]->uplink_seid = veb->seid;
5380                         ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
5381                         if (ret)
5382                                 break;
5383                 }
5384         }
5385
5386 end_reconstitute:
5387         return ret;
5388 }
5389
5390 /**
5391  * i40e_get_capabilities - get info about the HW
5392  * @pf: the PF struct
5393  **/
5394 static int i40e_get_capabilities(struct i40e_pf *pf)
5395 {
5396         struct i40e_aqc_list_capabilities_element_resp *cap_buf;
5397         u16 data_size;
5398         int buf_len;
5399         int err;
5400
5401         buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
5402         do {
5403                 cap_buf = kzalloc(buf_len, GFP_KERNEL);
5404                 if (!cap_buf)
5405                         return -ENOMEM;
5406
5407                 /* this loads the data into the hw struct for us */
5408                 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
5409                                             &data_size,
5410                                             i40e_aqc_opc_list_func_capabilities,
5411                                             NULL);
5412                 /* data loaded, buffer no longer needed */
5413                 kfree(cap_buf);
5414
5415                 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
5416                         /* retry with a larger buffer */
5417                         buf_len = data_size;
5418                 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
5419                         dev_info(&pf->pdev->dev,
5420                                  "capability discovery failed: aq=%d\n",
5421                                  pf->hw.aq.asq_last_status);
5422                         return -ENODEV;
5423                 }
5424         } while (err);
5425
5426         if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
5427             (pf->hw.aq.fw_maj_ver < 2)) {
5428                 pf->hw.func_caps.num_msix_vectors++;
5429                 pf->hw.func_caps.num_msix_vectors_vf++;
5430         }
5431
5432         if (pf->hw.debug_mask & I40E_DEBUG_USER)
5433                 dev_info(&pf->pdev->dev,
5434                          "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
5435                          pf->hw.pf_id, pf->hw.func_caps.num_vfs,
5436                          pf->hw.func_caps.num_msix_vectors,
5437                          pf->hw.func_caps.num_msix_vectors_vf,
5438                          pf->hw.func_caps.fd_filters_guaranteed,
5439                          pf->hw.func_caps.fd_filters_best_effort,
5440                          pf->hw.func_caps.num_tx_qp,
5441                          pf->hw.func_caps.num_vsis);
5442
5443 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
5444                        + pf->hw.func_caps.num_vfs)
5445         if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
5446                 dev_info(&pf->pdev->dev,
5447                          "got num_vsis %d, setting num_vsis to %d\n",
5448                          pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
5449                 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
5450         }
5451
5452         return 0;
5453 }
5454
5455 static int i40e_vsi_clear(struct i40e_vsi *vsi);
5456
5457 /**
5458  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
5459  * @pf: board private structure
5460  **/
5461 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
5462 {
5463         struct i40e_vsi *vsi;
5464         int i;
5465
5466         /* quick workaround for an NVM issue that leaves a critical register
5467          * uninitialized
5468          */
5469         if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
5470                 static const u32 hkey[] = {
5471                         0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
5472                         0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
5473                         0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
5474                         0x95b3a76d};
5475
5476                 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
5477                         wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
5478         }
5479
5480         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
5481                 return;
5482
5483         /* find existing VSI and see if it needs configuring */
5484         vsi = NULL;
5485         for (i = 0; i < pf->num_alloc_vsi; i++) {
5486                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5487                         vsi = pf->vsi[i];
5488                         break;
5489                 }
5490         }
5491
5492         /* create a new VSI if none exists */
5493         if (!vsi) {
5494                 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
5495                                      pf->vsi[pf->lan_vsi]->seid, 0);
5496                 if (!vsi) {
5497                         dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
5498                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
5499                         return;
5500                 }
5501         }
5502
5503         i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
5504 }
5505
5506 /**
5507  * i40e_fdir_teardown - release the Flow Director resources
5508  * @pf: board private structure
5509  **/
5510 static void i40e_fdir_teardown(struct i40e_pf *pf)
5511 {
5512         int i;
5513
5514         i40e_fdir_filter_exit(pf);
5515         for (i = 0; i < pf->num_alloc_vsi; i++) {
5516                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5517                         i40e_vsi_release(pf->vsi[i]);
5518                         break;
5519                 }
5520         }
5521 }
5522
5523 /**
5524  * i40e_prep_for_reset - prep for the core to reset
5525  * @pf: board private structure
5526  *
5527  * Close up the VFs and other things in prep for pf Reset.
5528   **/
5529 static int i40e_prep_for_reset(struct i40e_pf *pf)
5530 {
5531         struct i40e_hw *hw = &pf->hw;
5532         i40e_status ret = 0;
5533         u32 v;
5534
5535         clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
5536         if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
5537                 return 0;
5538
5539         dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
5540
5541         /* quiesce the VSIs and their queues that are not already DOWN */
5542         i40e_pf_quiesce_all_vsi(pf);
5543
5544         for (v = 0; v < pf->num_alloc_vsi; v++) {
5545                 if (pf->vsi[v])
5546                         pf->vsi[v]->seid = 0;
5547         }
5548
5549         i40e_shutdown_adminq(&pf->hw);
5550
5551         /* call shutdown HMC */
5552         if (hw->hmc.hmc_obj) {
5553                 ret = i40e_shutdown_lan_hmc(hw);
5554                 if (ret) {
5555                         dev_warn(&pf->pdev->dev,
5556                                  "shutdown_lan_hmc failed: %d\n", ret);
5557                         clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
5558                 }
5559         }
5560         return ret;
5561 }
5562
5563 /**
5564  * i40e_send_version - update firmware with driver version
5565  * @pf: PF struct
5566  */
5567 static void i40e_send_version(struct i40e_pf *pf)
5568 {
5569         struct i40e_driver_version dv;
5570
5571         dv.major_version = DRV_VERSION_MAJOR;
5572         dv.minor_version = DRV_VERSION_MINOR;
5573         dv.build_version = DRV_VERSION_BUILD;
5574         dv.subbuild_version = 0;
5575         strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
5576         i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
5577 }
5578
5579 /**
5580  * i40e_reset_and_rebuild - reset and rebuild using a saved config
5581  * @pf: board private structure
5582  * @reinit: if the Main VSI needs to re-initialized.
5583  **/
5584 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5585 {
5586         struct i40e_hw *hw = &pf->hw;
5587         i40e_status ret;
5588         u32 v;
5589
5590         /* Now we wait for GRST to settle out.
5591          * We don't have to delete the VEBs or VSIs from the hw switch
5592          * because the reset will make them disappear.
5593          */
5594         ret = i40e_pf_reset(hw);
5595         if (ret) {
5596                 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
5597                 goto end_core_reset;
5598         }
5599         pf->pfr_count++;
5600
5601         if (test_bit(__I40E_DOWN, &pf->state))
5602                 goto end_core_reset;
5603         dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
5604
5605         /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
5606         ret = i40e_init_adminq(&pf->hw);
5607         if (ret) {
5608                 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
5609                 goto end_core_reset;
5610         }
5611
5612         /* re-verify the eeprom if we just had an EMP reset */
5613         if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
5614                 clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
5615                 i40e_verify_eeprom(pf);
5616         }
5617
5618         i40e_clear_pxe_mode(hw);
5619         ret = i40e_get_capabilities(pf);
5620         if (ret) {
5621                 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
5622                          ret);
5623                 goto end_core_reset;
5624         }
5625
5626         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
5627                                 hw->func_caps.num_rx_qp,
5628                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
5629         if (ret) {
5630                 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
5631                 goto end_core_reset;
5632         }
5633         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
5634         if (ret) {
5635                 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
5636                 goto end_core_reset;
5637         }
5638
5639 #ifdef CONFIG_I40E_DCB
5640         ret = i40e_init_pf_dcb(pf);
5641         if (ret) {
5642                 dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret);
5643                 goto end_core_reset;
5644         }
5645 #endif /* CONFIG_I40E_DCB */
5646
5647         /* do basic switch setup */
5648         ret = i40e_setup_pf_switch(pf, reinit);
5649         if (ret)
5650                 goto end_core_reset;
5651
5652         /* Rebuild the VSIs and VEBs that existed before reset.
5653          * They are still in our local switch element arrays, so only
5654          * need to rebuild the switch model in the HW.
5655          *
5656          * If there were VEBs but the reconstitution failed, we'll try
5657          * try to recover minimal use by getting the basic PF VSI working.
5658          */
5659         if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
5660                 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
5661                 /* find the one VEB connected to the MAC, and find orphans */
5662                 for (v = 0; v < I40E_MAX_VEB; v++) {
5663                         if (!pf->veb[v])
5664                                 continue;
5665
5666                         if (pf->veb[v]->uplink_seid == pf->mac_seid ||
5667                             pf->veb[v]->uplink_seid == 0) {
5668                                 ret = i40e_reconstitute_veb(pf->veb[v]);
5669
5670                                 if (!ret)
5671                                         continue;
5672
5673                                 /* If Main VEB failed, we're in deep doodoo,
5674                                  * so give up rebuilding the switch and set up
5675                                  * for minimal rebuild of PF VSI.
5676                                  * If orphan failed, we'll report the error
5677                                  * but try to keep going.
5678                                  */
5679                                 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
5680                                         dev_info(&pf->pdev->dev,
5681                                                  "rebuild of switch failed: %d, will try to set up simple PF connection\n",
5682                                                  ret);
5683                                         pf->vsi[pf->lan_vsi]->uplink_seid
5684                                                                 = pf->mac_seid;
5685                                         break;
5686                                 } else if (pf->veb[v]->uplink_seid == 0) {
5687                                         dev_info(&pf->pdev->dev,
5688                                                  "rebuild of orphan VEB failed: %d\n",
5689                                                  ret);
5690                                 }
5691                         }
5692                 }
5693         }
5694
5695         if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
5696                 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
5697                 /* no VEB, so rebuild only the Main VSI */
5698                 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
5699                 if (ret) {
5700                         dev_info(&pf->pdev->dev,
5701                                  "rebuild of Main VSI failed: %d\n", ret);
5702                         goto end_core_reset;
5703                 }
5704         }
5705
5706         /* reinit the misc interrupt */
5707         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5708                 ret = i40e_setup_misc_vector(pf);
5709
5710         /* restart the VSIs that were rebuilt and running before the reset */
5711         i40e_pf_unquiesce_all_vsi(pf);
5712
5713         if (pf->num_alloc_vfs) {
5714                 for (v = 0; v < pf->num_alloc_vfs; v++)
5715                         i40e_reset_vf(&pf->vf[v], true);
5716         }
5717
5718         /* tell the firmware that we're starting */
5719         i40e_send_version(pf);
5720
5721 end_core_reset:
5722         clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
5723 }
5724
5725 /**
5726  * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
5727  * @pf: board private structure
5728  *
5729  * Close up the VFs and other things in prep for a Core Reset,
5730  * then get ready to rebuild the world.
5731  **/
5732 static void i40e_handle_reset_warning(struct i40e_pf *pf)
5733 {
5734         i40e_status ret;
5735
5736         ret = i40e_prep_for_reset(pf);
5737         if (!ret)
5738                 i40e_reset_and_rebuild(pf, false);
5739 }
5740
5741 /**
5742  * i40e_handle_mdd_event
5743  * @pf: pointer to the pf structure
5744  *
5745  * Called from the MDD irq handler to identify possibly malicious vfs
5746  **/
5747 static void i40e_handle_mdd_event(struct i40e_pf *pf)
5748 {
5749         struct i40e_hw *hw = &pf->hw;
5750         bool mdd_detected = false;
5751         struct i40e_vf *vf;
5752         u32 reg;
5753         int i;
5754
5755         if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
5756                 return;
5757
5758         /* find what triggered the MDD event */
5759         reg = rd32(hw, I40E_GL_MDET_TX);
5760         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
5761                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
5762                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
5763                 u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
5764                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
5765                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >>
5766                                 I40E_GL_MDET_TX_EVENT_SHIFT;
5767                 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
5768                                 I40E_GL_MDET_TX_QUEUE_SHIFT;
5769                 dev_info(&pf->pdev->dev,
5770                          "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
5771                          event, queue, pf_num, vf_num);
5772                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
5773                 mdd_detected = true;
5774         }
5775         reg = rd32(hw, I40E_GL_MDET_RX);
5776         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
5777                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
5778                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
5779                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >>
5780                                 I40E_GL_MDET_RX_EVENT_SHIFT;
5781                 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
5782                                 I40E_GL_MDET_RX_QUEUE_SHIFT;
5783                 dev_info(&pf->pdev->dev,
5784                          "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
5785                          event, queue, func);
5786                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
5787                 mdd_detected = true;
5788         }
5789
5790         /* see if one of the VFs needs its hand slapped */
5791         for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
5792                 vf = &(pf->vf[i]);
5793                 reg = rd32(hw, I40E_VP_MDET_TX(i));
5794                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
5795                         wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
5796                         vf->num_mdd_events++;
5797                         dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
5798                 }
5799
5800                 reg = rd32(hw, I40E_VP_MDET_RX(i));
5801                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
5802                         wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
5803                         vf->num_mdd_events++;
5804                         dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
5805                 }
5806
5807                 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
5808                         dev_info(&pf->pdev->dev,
5809                                  "Too many MDD events on VF %d, disabled\n", i);
5810                         dev_info(&pf->pdev->dev,
5811                                  "Use PF Control I/F to re-enable the VF\n");
5812                         set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
5813                 }
5814         }
5815
5816         /* re-enable mdd interrupt cause */
5817         clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
5818         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
5819         reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
5820         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
5821         i40e_flush(hw);
5822 }
5823
5824 #ifdef CONFIG_I40E_VXLAN
5825 /**
5826  * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
5827  * @pf: board private structure
5828  **/
5829 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
5830 {
5831         struct i40e_hw *hw = &pf->hw;
5832         i40e_status ret;
5833         u8 filter_index;
5834         __be16 port;
5835         int i;
5836
5837         if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
5838                 return;
5839
5840         pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
5841
5842         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
5843                 if (pf->pending_vxlan_bitmap & (1 << i)) {
5844                         pf->pending_vxlan_bitmap &= ~(1 << i);
5845                         port = pf->vxlan_ports[i];
5846                         ret = port ?
5847                               i40e_aq_add_udp_tunnel(hw, ntohs(port),
5848                                                      I40E_AQC_TUNNEL_TYPE_VXLAN,
5849                                                      &filter_index, NULL)
5850                               : i40e_aq_del_udp_tunnel(hw, i, NULL);
5851
5852                         if (ret) {
5853                                 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
5854                                          port ? "adding" : "deleting",
5855                                          ntohs(port), port ? i : i);
5856
5857                                 pf->vxlan_ports[i] = 0;
5858                         } else {
5859                                 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
5860                                          port ? "Added" : "Deleted",
5861                                          ntohs(port), port ? i : filter_index);
5862                         }
5863                 }
5864         }
5865 }
5866
5867 #endif
5868 /**
5869  * i40e_service_task - Run the driver's async subtasks
5870  * @work: pointer to work_struct containing our data
5871  **/
5872 static void i40e_service_task(struct work_struct *work)
5873 {
5874         struct i40e_pf *pf = container_of(work,
5875                                           struct i40e_pf,
5876                                           service_task);
5877         unsigned long start_time = jiffies;
5878
5879         i40e_reset_subtask(pf);
5880         i40e_handle_mdd_event(pf);
5881         i40e_vc_process_vflr_event(pf);
5882         i40e_watchdog_subtask(pf);
5883         i40e_fdir_reinit_subtask(pf);
5884         i40e_check_hang_subtask(pf);
5885         i40e_sync_filters_subtask(pf);
5886 #ifdef CONFIG_I40E_VXLAN
5887         i40e_sync_vxlan_filters_subtask(pf);
5888 #endif
5889         i40e_clean_adminq_subtask(pf);
5890
5891         i40e_service_event_complete(pf);
5892
5893         /* If the tasks have taken longer than one timer cycle or there
5894          * is more work to be done, reschedule the service task now
5895          * rather than wait for the timer to tick again.
5896          */
5897         if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
5898             test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)            ||
5899             test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)               ||
5900             test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
5901                 i40e_service_event_schedule(pf);
5902 }
5903
5904 /**
5905  * i40e_service_timer - timer callback
5906  * @data: pointer to PF struct
5907  **/
5908 static void i40e_service_timer(unsigned long data)
5909 {
5910         struct i40e_pf *pf = (struct i40e_pf *)data;
5911
5912         mod_timer(&pf->service_timer,
5913                   round_jiffies(jiffies + pf->service_timer_period));
5914         i40e_service_event_schedule(pf);
5915 }
5916
5917 /**
5918  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
5919  * @vsi: the VSI being configured
5920  **/
5921 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
5922 {
5923         struct i40e_pf *pf = vsi->back;
5924
5925         switch (vsi->type) {
5926         case I40E_VSI_MAIN:
5927                 vsi->alloc_queue_pairs = pf->num_lan_qps;
5928                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5929                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
5930                 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5931                         vsi->num_q_vectors = pf->num_lan_msix;
5932                 else
5933                         vsi->num_q_vectors = 1;
5934
5935                 break;
5936
5937         case I40E_VSI_FDIR:
5938                 vsi->alloc_queue_pairs = 1;
5939                 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
5940                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
5941                 vsi->num_q_vectors = 1;
5942                 break;
5943
5944         case I40E_VSI_VMDQ2:
5945                 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
5946                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5947                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
5948                 vsi->num_q_vectors = pf->num_vmdq_msix;
5949                 break;
5950
5951         case I40E_VSI_SRIOV:
5952                 vsi->alloc_queue_pairs = pf->num_vf_qps;
5953                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5954                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
5955                 break;
5956
5957         default:
5958                 WARN_ON(1);
5959                 return -ENODATA;
5960         }
5961
5962         return 0;
5963 }
5964
5965 /**
5966  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
5967  * @type: VSI pointer
5968  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
5969  *
5970  * On error: returns error code (negative)
5971  * On success: returns 0
5972  **/
5973 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
5974 {
5975         int size;
5976         int ret = 0;
5977
5978         /* allocate memory for both Tx and Rx ring pointers */
5979         size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
5980         vsi->tx_rings = kzalloc(size, GFP_KERNEL);
5981         if (!vsi->tx_rings)
5982                 return -ENOMEM;
5983         vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
5984
5985         if (alloc_qvectors) {
5986                 /* allocate memory for q_vector pointers */
5987                 size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
5988                 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
5989                 if (!vsi->q_vectors) {
5990                         ret = -ENOMEM;
5991                         goto err_vectors;
5992                 }
5993         }
5994         return ret;
5995
5996 err_vectors:
5997         kfree(vsi->tx_rings);
5998         return ret;
5999 }
6000
6001 /**
6002  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
6003  * @pf: board private structure
6004  * @type: type of VSI
6005  *
6006  * On error: returns error code (negative)
6007  * On success: returns vsi index in PF (positive)
6008  **/
6009 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
6010 {
6011         int ret = -ENODEV;
6012         struct i40e_vsi *vsi;
6013         int vsi_idx;
6014         int i;
6015
6016         /* Need to protect the allocation of the VSIs at the PF level */
6017         mutex_lock(&pf->switch_mutex);
6018
6019         /* VSI list may be fragmented if VSI creation/destruction has
6020          * been happening.  We can afford to do a quick scan to look
6021          * for any free VSIs in the list.
6022          *
6023          * find next empty vsi slot, looping back around if necessary
6024          */
6025         i = pf->next_vsi;
6026         while (i < pf->num_alloc_vsi && pf->vsi[i])
6027                 i++;
6028         if (i >= pf->num_alloc_vsi) {
6029                 i = 0;
6030                 while (i < pf->next_vsi && pf->vsi[i])
6031                         i++;
6032         }
6033
6034         if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
6035                 vsi_idx = i;             /* Found one! */
6036         } else {
6037                 ret = -ENODEV;
6038                 goto unlock_pf;  /* out of VSI slots! */
6039         }
6040         pf->next_vsi = ++i;
6041
6042         vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
6043         if (!vsi) {
6044                 ret = -ENOMEM;
6045                 goto unlock_pf;
6046         }
6047         vsi->type = type;
6048         vsi->back = pf;
6049         set_bit(__I40E_DOWN, &vsi->state);
6050         vsi->flags = 0;
6051         vsi->idx = vsi_idx;
6052         vsi->rx_itr_setting = pf->rx_itr_default;
6053         vsi->tx_itr_setting = pf->tx_itr_default;
6054         vsi->netdev_registered = false;
6055         vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
6056         INIT_LIST_HEAD(&vsi->mac_filter_list);
6057         vsi->irqs_ready = false;
6058
6059         ret = i40e_set_num_rings_in_vsi(vsi);
6060         if (ret)
6061                 goto err_rings;
6062
6063         ret = i40e_vsi_alloc_arrays(vsi, true);
6064         if (ret)
6065                 goto err_rings;
6066
6067         /* Setup default MSIX irq handler for VSI */
6068         i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
6069
6070         pf->vsi[vsi_idx] = vsi;
6071         ret = vsi_idx;
6072         goto unlock_pf;
6073
6074 err_rings:
6075         pf->next_vsi = i - 1;
6076         kfree(vsi);
6077 unlock_pf:
6078         mutex_unlock(&pf->switch_mutex);
6079         return ret;
6080 }
6081
6082 /**
6083  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
6084  * @type: VSI pointer
6085  * @free_qvectors: a bool to specify if q_vectors need to be freed.
6086  *
6087  * On error: returns error code (negative)
6088  * On success: returns 0
6089  **/
6090 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
6091 {
6092         /* free the ring and vector containers */
6093         if (free_qvectors) {
6094                 kfree(vsi->q_vectors);
6095                 vsi->q_vectors = NULL;
6096         }
6097         kfree(vsi->tx_rings);
6098         vsi->tx_rings = NULL;
6099         vsi->rx_rings = NULL;
6100 }
6101
6102 /**
6103  * i40e_vsi_clear - Deallocate the VSI provided
6104  * @vsi: the VSI being un-configured
6105  **/
6106 static int i40e_vsi_clear(struct i40e_vsi *vsi)
6107 {
6108         struct i40e_pf *pf;
6109
6110         if (!vsi)
6111                 return 0;
6112
6113         if (!vsi->back)
6114                 goto free_vsi;
6115         pf = vsi->back;
6116
6117         mutex_lock(&pf->switch_mutex);
6118         if (!pf->vsi[vsi->idx]) {
6119                 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
6120                         vsi->idx, vsi->idx, vsi, vsi->type);
6121                 goto unlock_vsi;
6122         }
6123
6124         if (pf->vsi[vsi->idx] != vsi) {
6125                 dev_err(&pf->pdev->dev,
6126                         "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
6127                         pf->vsi[vsi->idx]->idx,
6128                         pf->vsi[vsi->idx],
6129                         pf->vsi[vsi->idx]->type,
6130                         vsi->idx, vsi, vsi->type);
6131                 goto unlock_vsi;
6132         }
6133
6134         /* updates the pf for this cleared vsi */
6135         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
6136         i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
6137
6138         i40e_vsi_free_arrays(vsi, true);
6139
6140         pf->vsi[vsi->idx] = NULL;
6141         if (vsi->idx < pf->next_vsi)
6142                 pf->next_vsi = vsi->idx;
6143
6144 unlock_vsi:
6145         mutex_unlock(&pf->switch_mutex);
6146 free_vsi:
6147         kfree(vsi);
6148
6149         return 0;
6150 }
6151
6152 /**
6153  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
6154  * @vsi: the VSI being cleaned
6155  **/
6156 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
6157 {
6158         int i;
6159
6160         if (vsi->tx_rings && vsi->tx_rings[0]) {
6161                 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
6162                         kfree_rcu(vsi->tx_rings[i], rcu);
6163                         vsi->tx_rings[i] = NULL;
6164                         vsi->rx_rings[i] = NULL;
6165                 }
6166         }
6167 }
6168
6169 /**
6170  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
6171  * @vsi: the VSI being configured
6172  **/
6173 static int i40e_alloc_rings(struct i40e_vsi *vsi)
6174 {
6175         struct i40e_ring *tx_ring, *rx_ring;
6176         struct i40e_pf *pf = vsi->back;
6177         int i;
6178
6179         /* Set basic values in the rings to be used later during open() */
6180         for (i = 0; i < vsi->alloc_queue_pairs; i++) {
6181                 /* allocate space for both Tx and Rx in one shot */
6182                 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
6183                 if (!tx_ring)
6184                         goto err_out;
6185
6186                 tx_ring->queue_index = i;
6187                 tx_ring->reg_idx = vsi->base_queue + i;
6188                 tx_ring->ring_active = false;
6189                 tx_ring->vsi = vsi;
6190                 tx_ring->netdev = vsi->netdev;
6191                 tx_ring->dev = &pf->pdev->dev;
6192                 tx_ring->count = vsi->num_desc;
6193                 tx_ring->size = 0;
6194                 tx_ring->dcb_tc = 0;
6195                 vsi->tx_rings[i] = tx_ring;
6196
6197                 rx_ring = &tx_ring[1];
6198                 rx_ring->queue_index = i;
6199                 rx_ring->reg_idx = vsi->base_queue + i;
6200                 rx_ring->ring_active = false;
6201                 rx_ring->vsi = vsi;
6202                 rx_ring->netdev = vsi->netdev;
6203                 rx_ring->dev = &pf->pdev->dev;
6204                 rx_ring->count = vsi->num_desc;
6205                 rx_ring->size = 0;
6206                 rx_ring->dcb_tc = 0;
6207                 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
6208                         set_ring_16byte_desc_enabled(rx_ring);
6209                 else
6210                         clear_ring_16byte_desc_enabled(rx_ring);
6211                 vsi->rx_rings[i] = rx_ring;
6212         }
6213
6214         return 0;
6215
6216 err_out:
6217         i40e_vsi_clear_rings(vsi);
6218         return -ENOMEM;
6219 }
6220
6221 /**
6222  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
6223  * @pf: board private structure
6224  * @vectors: the number of MSI-X vectors to request
6225  *
6226  * Returns the number of vectors reserved, or error
6227  **/
6228 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
6229 {
6230         vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
6231                                         I40E_MIN_MSIX, vectors);
6232         if (vectors < 0) {
6233                 dev_info(&pf->pdev->dev,
6234                          "MSI-X vector reservation failed: %d\n", vectors);
6235                 vectors = 0;
6236         }
6237
6238         return vectors;
6239 }
6240
6241 /**
6242  * i40e_init_msix - Setup the MSIX capability
6243  * @pf: board private structure
6244  *
6245  * Work with the OS to set up the MSIX vectors needed.
6246  *
6247  * Returns 0 on success, negative on failure
6248  **/
6249 static int i40e_init_msix(struct i40e_pf *pf)
6250 {
6251         i40e_status err = 0;
6252         struct i40e_hw *hw = &pf->hw;
6253         int v_budget, i;
6254         int vec;
6255
6256         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6257                 return -ENODEV;
6258
6259         /* The number of vectors we'll request will be comprised of:
6260          *   - Add 1 for "other" cause for Admin Queue events, etc.
6261          *   - The number of LAN queue pairs
6262          *      - Queues being used for RSS.
6263          *              We don't need as many as max_rss_size vectors.
6264          *              use rss_size instead in the calculation since that
6265          *              is governed by number of cpus in the system.
6266          *      - assumes symmetric Tx/Rx pairing
6267          *   - The number of VMDq pairs
6268          * Once we count this up, try the request.
6269          *
6270          * If we can't get what we want, we'll simplify to nearly nothing
6271          * and try again.  If that still fails, we punt.
6272          */
6273         pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
6274         pf->num_vmdq_msix = pf->num_vmdq_qps;
6275         v_budget = 1 + pf->num_lan_msix;
6276         v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
6277         if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
6278                 v_budget++;
6279
6280         /* Scale down if necessary, and the rings will share vectors */
6281         v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
6282
6283         pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
6284                                    GFP_KERNEL);
6285         if (!pf->msix_entries)
6286                 return -ENOMEM;
6287
6288         for (i = 0; i < v_budget; i++)
6289                 pf->msix_entries[i].entry = i;
6290         vec = i40e_reserve_msix_vectors(pf, v_budget);
6291
6292         if (vec != v_budget) {
6293                 /* If we have limited resources, we will start with no vectors
6294                  * for the special features and then allocate vectors to some
6295                  * of these features based on the policy and at the end disable
6296                  * the features that did not get any vectors.
6297                  */
6298                 pf->num_vmdq_msix = 0;
6299         }
6300
6301         if (vec < I40E_MIN_MSIX) {
6302                 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
6303                 kfree(pf->msix_entries);
6304                 pf->msix_entries = NULL;
6305                 return -ENODEV;
6306
6307         } else if (vec == I40E_MIN_MSIX) {
6308                 /* Adjust for minimal MSIX use */
6309                 pf->num_vmdq_vsis = 0;
6310                 pf->num_vmdq_qps = 0;
6311                 pf->num_lan_qps = 1;
6312                 pf->num_lan_msix = 1;
6313
6314         } else if (vec != v_budget) {
6315                 /* reserve the misc vector */
6316                 vec--;
6317
6318                 /* Scale vector usage down */
6319                 pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
6320                 pf->num_vmdq_vsis = 1;
6321
6322                 /* partition out the remaining vectors */
6323                 switch (vec) {
6324                 case 2:
6325                         pf->num_lan_msix = 1;
6326                         break;
6327                 case 3:
6328                         pf->num_lan_msix = 2;
6329                         break;
6330                 default:
6331                         pf->num_lan_msix = min_t(int, (vec / 2),
6332                                                  pf->num_lan_qps);
6333                         pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
6334                                                   I40E_DEFAULT_NUM_VMDQ_VSI);
6335                         break;
6336                 }
6337         }
6338
6339         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
6340             (pf->num_vmdq_msix == 0)) {
6341                 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
6342                 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
6343         }
6344         return err;
6345 }
6346
6347 /**
6348  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
6349  * @vsi: the VSI being configured
6350  * @v_idx: index of the vector in the vsi struct
6351  *
6352  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
6353  **/
6354 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
6355 {
6356         struct i40e_q_vector *q_vector;
6357
6358         /* allocate q_vector */
6359         q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
6360         if (!q_vector)
6361                 return -ENOMEM;
6362
6363         q_vector->vsi = vsi;
6364         q_vector->v_idx = v_idx;
6365         cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
6366         if (vsi->netdev)
6367                 netif_napi_add(vsi->netdev, &q_vector->napi,
6368                                i40e_napi_poll, NAPI_POLL_WEIGHT);
6369
6370         q_vector->rx.latency_range = I40E_LOW_LATENCY;
6371         q_vector->tx.latency_range = I40E_LOW_LATENCY;
6372
6373         /* tie q_vector and vsi together */
6374         vsi->q_vectors[v_idx] = q_vector;
6375
6376         return 0;
6377 }
6378
6379 /**
6380  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
6381  * @vsi: the VSI being configured
6382  *
6383  * We allocate one q_vector per queue interrupt.  If allocation fails we
6384  * return -ENOMEM.
6385  **/
6386 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
6387 {
6388         struct i40e_pf *pf = vsi->back;
6389         int v_idx, num_q_vectors;
6390         int err;
6391
6392         /* if not MSIX, give the one vector only to the LAN VSI */
6393         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6394                 num_q_vectors = vsi->num_q_vectors;
6395         else if (vsi == pf->vsi[pf->lan_vsi])
6396                 num_q_vectors = 1;
6397         else
6398                 return -EINVAL;
6399
6400         for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
6401                 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
6402                 if (err)
6403                         goto err_out;
6404         }
6405
6406         return 0;
6407
6408 err_out:
6409         while (v_idx--)
6410                 i40e_free_q_vector(vsi, v_idx);
6411
6412         return err;
6413 }
6414
6415 /**
6416  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
6417  * @pf: board private structure to initialize
6418  **/
6419 static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
6420 {
6421         int err = 0;
6422
6423         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
6424                 err = i40e_init_msix(pf);
6425                 if (err) {
6426                         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
6427                                        I40E_FLAG_RSS_ENABLED    |
6428                                        I40E_FLAG_DCB_CAPABLE    |
6429                                        I40E_FLAG_SRIOV_ENABLED  |
6430                                        I40E_FLAG_FD_SB_ENABLED  |
6431                                        I40E_FLAG_FD_ATR_ENABLED |
6432                                        I40E_FLAG_VMDQ_ENABLED);
6433
6434                         /* rework the queue expectations without MSIX */
6435                         i40e_determine_queue_usage(pf);
6436                 }
6437         }
6438
6439         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
6440             (pf->flags & I40E_FLAG_MSI_ENABLED)) {
6441                 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
6442                 err = pci_enable_msi(pf->pdev);
6443                 if (err) {
6444                         dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
6445                         pf->flags &= ~I40E_FLAG_MSI_ENABLED;
6446                 }
6447         }
6448
6449         if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
6450                 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
6451
6452         /* track first vector for misc interrupts */
6453         err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
6454 }
6455
6456 /**
6457  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
6458  * @pf: board private structure
6459  *
6460  * This sets up the handler for MSIX 0, which is used to manage the
6461  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
6462  * when in MSI or Legacy interrupt mode.
6463  **/
6464 static int i40e_setup_misc_vector(struct i40e_pf *pf)
6465 {
6466         struct i40e_hw *hw = &pf->hw;
6467         int err = 0;
6468
6469         /* Only request the irq if this is the first time through, and
6470          * not when we're rebuilding after a Reset
6471          */
6472         if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6473                 err = request_irq(pf->msix_entries[0].vector,
6474                                   i40e_intr, 0, pf->misc_int_name, pf);
6475                 if (err) {
6476                         dev_info(&pf->pdev->dev,
6477                                  "request_irq for %s failed: %d\n",
6478                                  pf->misc_int_name, err);
6479                         return -EFAULT;
6480                 }
6481         }
6482
6483         i40e_enable_misc_int_causes(hw);
6484
6485         /* associate no queues to the misc vector */
6486         wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
6487         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
6488
6489         i40e_flush(hw);
6490
6491         i40e_irq_dynamic_enable_icr0(pf);
6492
6493         return err;
6494 }
6495
6496 /**
6497  * i40e_config_rss - Prepare for RSS if used
6498  * @pf: board private structure
6499  **/
6500 static int i40e_config_rss(struct i40e_pf *pf)
6501 {
6502         /* Set of random keys generated using kernel random number generator */
6503         static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
6504                                 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
6505                                 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
6506                                 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
6507         struct i40e_hw *hw = &pf->hw;
6508         u32 lut = 0;
6509         int i, j;
6510         u64 hena;
6511
6512         /* Fill out hash function seed */
6513         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6514                 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
6515
6516         /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
6517         hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
6518                 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
6519         hena |= I40E_DEFAULT_RSS_HENA;
6520         wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
6521         wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
6522
6523         /* Populate the LUT with max no. of queues in round robin fashion */
6524         for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
6525
6526                 /* The assumption is that lan qp count will be the highest
6527                  * qp count for any PF VSI that needs RSS.
6528                  * If multiple VSIs need RSS support, all the qp counts
6529                  * for those VSIs should be a power of 2 for RSS to work.
6530                  * If LAN VSI is the only consumer for RSS then this requirement
6531                  * is not necessary.
6532                  */
6533                 if (j == pf->rss_size)
6534                         j = 0;
6535                 /* lut = 4-byte sliding window of 4 lut entries */
6536                 lut = (lut << 8) | (j &
6537                          ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
6538                 /* On i = 3, we have 4 entries in lut; write to the register */
6539                 if ((i & 3) == 3)
6540                         wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
6541         }
6542         i40e_flush(hw);
6543
6544         return 0;
6545 }
6546
6547 /**
6548  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
6549  * @pf: board private structure
6550  * @queue_count: the requested queue count for rss.
6551  *
6552  * returns 0 if rss is not enabled, if enabled returns the final rss queue
6553  * count which may be different from the requested queue count.
6554  **/
6555 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
6556 {
6557         if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
6558                 return 0;
6559
6560         queue_count = min_t(int, queue_count, pf->rss_size_max);
6561
6562         if (queue_count != pf->rss_size) {
6563                 i40e_prep_for_reset(pf);
6564
6565                 pf->rss_size = queue_count;
6566
6567                 i40e_reset_and_rebuild(pf, true);
6568                 i40e_config_rss(pf);
6569         }
6570         dev_info(&pf->pdev->dev, "RSS count:  %d\n", pf->rss_size);
6571         return pf->rss_size;
6572 }
6573
6574 /**
6575  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
6576  * @pf: board private structure to initialize
6577  *
6578  * i40e_sw_init initializes the Adapter private data structure.
6579  * Fields are initialized based on PCI device information and
6580  * OS network device settings (MTU size).
6581  **/
6582 static int i40e_sw_init(struct i40e_pf *pf)
6583 {
6584         int err = 0;
6585         int size;
6586
6587         pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
6588                                 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
6589         pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
6590         if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
6591                 if (I40E_DEBUG_USER & debug)
6592                         pf->hw.debug_mask = debug;
6593                 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
6594                                                 I40E_DEFAULT_MSG_ENABLE);
6595         }
6596
6597         /* Set default capability flags */
6598         pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
6599                     I40E_FLAG_MSI_ENABLED     |
6600                     I40E_FLAG_MSIX_ENABLED    |
6601                     I40E_FLAG_RX_1BUF_ENABLED;
6602
6603         /* Set default ITR */
6604         pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
6605         pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
6606
6607         /* Depending on PF configurations, it is possible that the RSS
6608          * maximum might end up larger than the available queues
6609          */
6610         pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
6611         pf->rss_size_max = min_t(int, pf->rss_size_max,
6612                                  pf->hw.func_caps.num_tx_qp);
6613         if (pf->hw.func_caps.rss) {
6614                 pf->flags |= I40E_FLAG_RSS_ENABLED;
6615                 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
6616         } else {
6617                 pf->rss_size = 1;
6618         }
6619
6620         /* MFP mode enabled */
6621         if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
6622                 pf->flags |= I40E_FLAG_MFP_ENABLED;
6623                 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
6624         }
6625
6626         /* FW/NVM is not yet fixed in this regard */
6627         if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
6628             (pf->hw.func_caps.fd_filters_best_effort > 0)) {
6629                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6630                 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
6631                 /* Setup a counter for fd_atr per pf */
6632                 pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
6633                 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
6634                         pf->flags |= I40E_FLAG_FD_SB_ENABLED;
6635                         /* Setup a counter for fd_sb per pf */
6636                         pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
6637                 } else {
6638                         dev_info(&pf->pdev->dev,
6639                                  "Flow Director Sideband mode Disabled in MFP mode\n");
6640                 }
6641                 pf->fdir_pf_filter_count =
6642                                  pf->hw.func_caps.fd_filters_guaranteed;
6643                 pf->hw.fdir_shared_filter_count =
6644                                  pf->hw.func_caps.fd_filters_best_effort;
6645         }
6646
6647         if (pf->hw.func_caps.vmdq) {
6648                 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
6649                 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
6650                 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
6651         }
6652
6653 #ifdef CONFIG_PCI_IOV
6654         if (pf->hw.func_caps.num_vfs) {
6655                 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
6656                 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
6657                 pf->num_req_vfs = min_t(int,
6658                                         pf->hw.func_caps.num_vfs,
6659                                         I40E_MAX_VF_COUNT);
6660         }
6661 #endif /* CONFIG_PCI_IOV */
6662         pf->eeprom_version = 0xDEAD;
6663         pf->lan_veb = I40E_NO_VEB;
6664         pf->lan_vsi = I40E_NO_VSI;
6665
6666         /* set up queue assignment tracking */
6667         size = sizeof(struct i40e_lump_tracking)
6668                 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
6669         pf->qp_pile = kzalloc(size, GFP_KERNEL);
6670         if (!pf->qp_pile) {
6671                 err = -ENOMEM;
6672                 goto sw_init_done;
6673         }
6674         pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
6675         pf->qp_pile->search_hint = 0;
6676
6677         /* set up vector assignment tracking */
6678         size = sizeof(struct i40e_lump_tracking)
6679                 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
6680         pf->irq_pile = kzalloc(size, GFP_KERNEL);
6681         if (!pf->irq_pile) {
6682                 kfree(pf->qp_pile);
6683                 err = -ENOMEM;
6684                 goto sw_init_done;
6685         }
6686         pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
6687         pf->irq_pile->search_hint = 0;
6688
6689         mutex_init(&pf->switch_mutex);
6690
6691 sw_init_done:
6692         return err;
6693 }
6694
6695 /**
6696  * i40e_set_ntuple - set the ntuple feature flag and take action
6697  * @pf: board private structure to initialize
6698  * @features: the feature set that the stack is suggesting
6699  *
6700  * returns a bool to indicate if reset needs to happen
6701  **/
6702 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
6703 {
6704         bool need_reset = false;
6705
6706         /* Check if Flow Director n-tuple support was enabled or disabled.  If
6707          * the state changed, we need to reset.
6708          */
6709         if (features & NETIF_F_NTUPLE) {
6710                 /* Enable filters and mark for reset */
6711                 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6712                         need_reset = true;
6713                 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
6714         } else {
6715                 /* turn off filters, mark for reset and clear SW filter list */
6716                 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
6717                         need_reset = true;
6718                         i40e_fdir_filter_exit(pf);
6719                 }
6720                 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6721                 /* if ATR was disabled it can be re-enabled. */
6722                 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
6723                         pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6724         }
6725         return need_reset;
6726 }
6727
6728 /**
6729  * i40e_set_features - set the netdev feature flags
6730  * @netdev: ptr to the netdev being adjusted
6731  * @features: the feature set that the stack is suggesting
6732  **/
6733 static int i40e_set_features(struct net_device *netdev,
6734                              netdev_features_t features)
6735 {
6736         struct i40e_netdev_priv *np = netdev_priv(netdev);
6737         struct i40e_vsi *vsi = np->vsi;
6738         struct i40e_pf *pf = vsi->back;
6739         bool need_reset;
6740
6741         if (features & NETIF_F_HW_VLAN_CTAG_RX)
6742                 i40e_vlan_stripping_enable(vsi);
6743         else
6744                 i40e_vlan_stripping_disable(vsi);
6745
6746         need_reset = i40e_set_ntuple(pf, features);
6747
6748         if (need_reset)
6749                 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
6750
6751         return 0;
6752 }
6753
6754 #ifdef CONFIG_I40E_VXLAN
6755 /**
6756  * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
6757  * @pf: board private structure
6758  * @port: The UDP port to look up
6759  *
6760  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
6761  **/
6762 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
6763 {
6764         u8 i;
6765
6766         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6767                 if (pf->vxlan_ports[i] == port)
6768                         return i;
6769         }
6770
6771         return i;
6772 }
6773
6774 /**
6775  * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
6776  * @netdev: This physical port's netdev
6777  * @sa_family: Socket Family that VXLAN is notifying us about
6778  * @port: New UDP port number that VXLAN started listening to
6779  **/
6780 static void i40e_add_vxlan_port(struct net_device *netdev,
6781                                 sa_family_t sa_family, __be16 port)
6782 {
6783         struct i40e_netdev_priv *np = netdev_priv(netdev);
6784         struct i40e_vsi *vsi = np->vsi;
6785         struct i40e_pf *pf = vsi->back;
6786         u8 next_idx;
6787         u8 idx;
6788
6789         if (sa_family == AF_INET6)
6790                 return;
6791
6792         idx = i40e_get_vxlan_port_idx(pf, port);
6793
6794         /* Check if port already exists */
6795         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
6796                 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
6797                 return;
6798         }
6799
6800         /* Now check if there is space to add the new port */
6801         next_idx = i40e_get_vxlan_port_idx(pf, 0);
6802
6803         if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
6804                 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
6805                             ntohs(port));
6806                 return;
6807         }
6808
6809         /* New port: add it and mark its index in the bitmap */
6810         pf->vxlan_ports[next_idx] = port;
6811         pf->pending_vxlan_bitmap |= (1 << next_idx);
6812
6813         pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
6814 }
6815
6816 /**
6817  * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
6818  * @netdev: This physical port's netdev
6819  * @sa_family: Socket Family that VXLAN is notifying us about
6820  * @port: UDP port number that VXLAN stopped listening to
6821  **/
6822 static void i40e_del_vxlan_port(struct net_device *netdev,
6823                                 sa_family_t sa_family, __be16 port)
6824 {
6825         struct i40e_netdev_priv *np = netdev_priv(netdev);
6826         struct i40e_vsi *vsi = np->vsi;
6827         struct i40e_pf *pf = vsi->back;
6828         u8 idx;
6829
6830         if (sa_family == AF_INET6)
6831                 return;
6832
6833         idx = i40e_get_vxlan_port_idx(pf, port);
6834
6835         /* Check if port already exists */
6836         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
6837                 /* if port exists, set it to 0 (mark for deletion)
6838                  * and make it pending
6839                  */
6840                 pf->vxlan_ports[idx] = 0;
6841
6842                 pf->pending_vxlan_bitmap |= (1 << idx);
6843
6844                 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
6845         } else {
6846                 netdev_warn(netdev, "Port %d was not found, not deleting\n",
6847                             ntohs(port));
6848         }
6849 }
6850
6851 #endif
6852 #ifdef HAVE_FDB_OPS
6853 #ifdef USE_CONST_DEV_UC_CHAR
6854 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
6855                             struct net_device *dev,
6856                             const unsigned char *addr,
6857                             u16 flags)
6858 #else
6859 static int i40e_ndo_fdb_add(struct ndmsg *ndm,
6860                             struct net_device *dev,
6861                             unsigned char *addr,
6862                             u16 flags)
6863 #endif
6864 {
6865         struct i40e_netdev_priv *np = netdev_priv(dev);
6866         struct i40e_pf *pf = np->vsi->back;
6867         int err = 0;
6868
6869         if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
6870                 return -EOPNOTSUPP;
6871
6872         /* Hardware does not support aging addresses so if a
6873          * ndm_state is given only allow permanent addresses
6874          */
6875         if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6876                 netdev_info(dev, "FDB only supports static addresses\n");
6877                 return -EINVAL;
6878         }
6879
6880         if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6881                 err = dev_uc_add_excl(dev, addr);
6882         else if (is_multicast_ether_addr(addr))
6883                 err = dev_mc_add_excl(dev, addr);
6884         else
6885                 err = -EINVAL;
6886
6887         /* Only return duplicate errors if NLM_F_EXCL is set */
6888         if (err == -EEXIST && !(flags & NLM_F_EXCL))
6889                 err = 0;
6890
6891         return err;
6892 }
6893
6894 #ifndef USE_DEFAULT_FDB_DEL_DUMP
6895 #ifdef USE_CONST_DEV_UC_CHAR
6896 static int i40e_ndo_fdb_del(struct ndmsg *ndm,
6897                             struct net_device *dev,
6898                             const unsigned char *addr)
6899 #else
6900 static int i40e_ndo_fdb_del(struct ndmsg *ndm,
6901                             struct net_device *dev,
6902                             unsigned char *addr)
6903 #endif
6904 {
6905         struct i40e_netdev_priv *np = netdev_priv(dev);
6906         struct i40e_pf *pf = np->vsi->back;
6907         int err = -EOPNOTSUPP;
6908
6909         if (ndm->ndm_state & NUD_PERMANENT) {
6910                 netdev_info(dev, "FDB only supports static addresses\n");
6911                 return -EINVAL;
6912         }
6913
6914         if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
6915                 if (is_unicast_ether_addr(addr))
6916                         err = dev_uc_del(dev, addr);
6917                 else if (is_multicast_ether_addr(addr))
6918                         err = dev_mc_del(dev, addr);
6919                 else
6920                         err = -EINVAL;
6921         }
6922
6923         return err;
6924 }
6925
6926 static int i40e_ndo_fdb_dump(struct sk_buff *skb,
6927                              struct netlink_callback *cb,
6928                              struct net_device *dev,
6929                              int idx)
6930 {
6931         struct i40e_netdev_priv *np = netdev_priv(dev);
6932         struct i40e_pf *pf = np->vsi->back;
6933
6934         if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
6935                 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
6936
6937         return idx;
6938 }
6939
6940 #endif /* USE_DEFAULT_FDB_DEL_DUMP */
6941 #endif /* HAVE_FDB_OPS */
6942 static const struct net_device_ops i40e_netdev_ops = {
6943         .ndo_open               = i40e_open,
6944         .ndo_stop               = i40e_close,
6945         .ndo_start_xmit         = i40e_lan_xmit_frame,
6946         .ndo_get_stats64        = i40e_get_netdev_stats_struct,
6947         .ndo_set_rx_mode        = i40e_set_rx_mode,
6948         .ndo_validate_addr      = eth_validate_addr,
6949         .ndo_set_mac_address    = i40e_set_mac,
6950         .ndo_change_mtu         = i40e_change_mtu,
6951         .ndo_do_ioctl           = i40e_ioctl,
6952         .ndo_tx_timeout         = i40e_tx_timeout,
6953         .ndo_vlan_rx_add_vid    = i40e_vlan_rx_add_vid,
6954         .ndo_vlan_rx_kill_vid   = i40e_vlan_rx_kill_vid,
6955 #ifdef CONFIG_NET_POLL_CONTROLLER
6956         .ndo_poll_controller    = i40e_netpoll,
6957 #endif
6958         .ndo_setup_tc           = i40e_setup_tc,
6959         .ndo_set_features       = i40e_set_features,
6960         .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
6961         .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
6962         .ndo_set_vf_rate        = i40e_ndo_set_vf_bw,
6963         .ndo_get_vf_config      = i40e_ndo_get_vf_config,
6964         .ndo_set_vf_link_state  = i40e_ndo_set_vf_link_state,
6965         .ndo_set_vf_spoofchk    = i40e_ndo_set_vf_spoofck,
6966 #ifdef CONFIG_I40E_VXLAN
6967         .ndo_add_vxlan_port     = i40e_add_vxlan_port,
6968         .ndo_del_vxlan_port     = i40e_del_vxlan_port,
6969 #endif
6970 #ifdef HAVE_FDB_OPS
6971         .ndo_fdb_add            = i40e_ndo_fdb_add,
6972 #ifndef USE_DEFAULT_FDB_DEL_DUMP
6973         .ndo_fdb_del            = i40e_ndo_fdb_del,
6974         .ndo_fdb_dump           = i40e_ndo_fdb_dump,
6975 #endif
6976 #endif
6977 };
6978
6979 /**
6980  * i40e_config_netdev - Setup the netdev flags
6981  * @vsi: the VSI being configured
6982  *
6983  * Returns 0 on success, negative value on failure
6984  **/
6985 static int i40e_config_netdev(struct i40e_vsi *vsi)
6986 {
6987         u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
6988         struct i40e_pf *pf = vsi->back;
6989         struct i40e_hw *hw = &pf->hw;
6990         struct i40e_netdev_priv *np;
6991         struct net_device *netdev;
6992         u8 mac_addr[ETH_ALEN];
6993         int etherdev_size;
6994
6995         etherdev_size = sizeof(struct i40e_netdev_priv);
6996         netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
6997         if (!netdev)
6998                 return -ENOMEM;
6999
7000         vsi->netdev = netdev;
7001         np = netdev_priv(netdev);
7002         np->vsi = vsi;
7003
7004         netdev->hw_enc_features |= NETIF_F_IP_CSUM       |
7005                                   NETIF_F_GSO_UDP_TUNNEL |
7006                                   NETIF_F_TSO;
7007
7008         netdev->features = NETIF_F_SG                  |
7009                            NETIF_F_IP_CSUM             |
7010                            NETIF_F_SCTP_CSUM           |
7011                            NETIF_F_HIGHDMA             |
7012                            NETIF_F_GSO_UDP_TUNNEL      |
7013                            NETIF_F_HW_VLAN_CTAG_TX     |
7014                            NETIF_F_HW_VLAN_CTAG_RX     |
7015                            NETIF_F_HW_VLAN_CTAG_FILTER |
7016                            NETIF_F_IPV6_CSUM           |
7017                            NETIF_F_TSO                 |
7018                            NETIF_F_TSO_ECN             |
7019                            NETIF_F_TSO6                |
7020                            NETIF_F_RXCSUM              |
7021                            NETIF_F_RXHASH              |
7022                            0;
7023
7024         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
7025                 netdev->features |= NETIF_F_NTUPLE;
7026
7027         /* copy netdev features into list of user selectable features */
7028         netdev->hw_features |= netdev->features;
7029
7030         if (vsi->type == I40E_VSI_MAIN) {
7031                 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
7032                 ether_addr_copy(mac_addr, hw->mac.perm_addr);
7033                 /* The following two steps are necessary to prevent reception
7034                  * of tagged packets - by default the NVM loads a MAC-VLAN
7035                  * filter that will accept any tagged packet.  This is to
7036                  * prevent that during normal operations until a specific
7037                  * VLAN tag filter has been set.
7038                  */
7039                 i40e_rm_default_mac_filter(vsi, mac_addr);
7040                 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
7041         } else {
7042                 /* relate the VSI_VMDQ name to the VSI_MAIN name */
7043                 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
7044                          pf->vsi[pf->lan_vsi]->netdev->name);
7045                 random_ether_addr(mac_addr);
7046                 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
7047         }
7048         i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
7049
7050         ether_addr_copy(netdev->dev_addr, mac_addr);
7051         ether_addr_copy(netdev->perm_addr, mac_addr);
7052         /* vlan gets same features (except vlan offload)
7053          * after any tweaks for specific VSI types
7054          */
7055         netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
7056                                                      NETIF_F_HW_VLAN_CTAG_RX |
7057                                                    NETIF_F_HW_VLAN_CTAG_FILTER);
7058         netdev->priv_flags |= IFF_UNICAST_FLT;
7059         netdev->priv_flags |= IFF_SUPP_NOFCS;
7060         /* Setup netdev TC information */
7061         i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
7062
7063         netdev->netdev_ops = &i40e_netdev_ops;
7064         netdev->watchdog_timeo = 5 * HZ;
7065         i40e_set_ethtool_ops(netdev);
7066
7067         return 0;
7068 }
7069
7070 /**
7071  * i40e_vsi_delete - Delete a VSI from the switch
7072  * @vsi: the VSI being removed
7073  *
7074  * Returns 0 on success, negative value on failure
7075  **/
7076 static void i40e_vsi_delete(struct i40e_vsi *vsi)
7077 {
7078         /* remove default VSI is not allowed */
7079         if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
7080                 return;
7081
7082         i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
7083 }
7084
7085 /**
7086  * i40e_add_vsi - Add a VSI to the switch
7087  * @vsi: the VSI being configured
7088  *
7089  * This initializes a VSI context depending on the VSI type to be added and
7090  * passes it down to the add_vsi aq command.
7091  **/
7092 static int i40e_add_vsi(struct i40e_vsi *vsi)
7093 {
7094         int ret = -ENODEV;
7095         struct i40e_mac_filter *f, *ftmp;
7096         struct i40e_pf *pf = vsi->back;
7097         struct i40e_hw *hw = &pf->hw;
7098         struct i40e_vsi_context ctxt;
7099         u8 enabled_tc = 0x1; /* TC0 enabled */
7100         int f_count = 0;
7101
7102         memset(&ctxt, 0, sizeof(ctxt));
7103         switch (vsi->type) {
7104         case I40E_VSI_MAIN:
7105                 /* The PF's main VSI is already setup as part of the
7106                  * device initialization, so we'll not bother with
7107                  * the add_vsi call, but we will retrieve the current
7108                  * VSI context.
7109                  */
7110                 ctxt.seid = pf->main_vsi_seid;
7111                 ctxt.pf_num = pf->hw.pf_id;
7112                 ctxt.vf_num = 0;
7113                 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
7114                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
7115                 if (ret) {
7116                         dev_info(&pf->pdev->dev,
7117                                  "couldn't get pf vsi config, err %d, aq_err %d\n",
7118                                  ret, pf->hw.aq.asq_last_status);
7119                         return -ENOENT;
7120                 }
7121                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
7122                 vsi->info.valid_sections = 0;
7123
7124                 vsi->seid = ctxt.seid;
7125                 vsi->id = ctxt.vsi_number;
7126
7127                 enabled_tc = i40e_pf_get_tc_map(pf);
7128
7129                 /* MFP mode setup queue map and update VSI */
7130                 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7131                         memset(&ctxt, 0, sizeof(ctxt));
7132                         ctxt.seid = pf->main_vsi_seid;
7133                         ctxt.pf_num = pf->hw.pf_id;
7134                         ctxt.vf_num = 0;
7135                         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
7136                         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7137                         if (ret) {
7138                                 dev_info(&pf->pdev->dev,
7139                                          "update vsi failed, aq_err=%d\n",
7140                                          pf->hw.aq.asq_last_status);
7141                                 ret = -ENOENT;
7142                                 goto err;
7143                         }
7144                         /* update the local VSI info queue map */
7145                         i40e_vsi_update_queue_map(vsi, &ctxt);
7146                         vsi->info.valid_sections = 0;
7147                 } else {
7148                         /* Default/Main VSI is only enabled for TC0
7149                          * reconfigure it to enable all TCs that are
7150                          * available on the port in SFP mode.
7151                          */
7152                         ret = i40e_vsi_config_tc(vsi, enabled_tc);
7153                         if (ret) {
7154                                 dev_info(&pf->pdev->dev,
7155                                          "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
7156                                          enabled_tc, ret,
7157                                          pf->hw.aq.asq_last_status);
7158                                 ret = -ENOENT;
7159                         }
7160                 }
7161                 break;
7162
7163         case I40E_VSI_FDIR:
7164                 ctxt.pf_num = hw->pf_id;
7165                 ctxt.vf_num = 0;
7166                 ctxt.uplink_seid = vsi->uplink_seid;
7167                 ctxt.connection_type = 0x1;     /* regular data port */
7168                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
7169                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7170                 break;
7171
7172         case I40E_VSI_VMDQ2:
7173                 ctxt.pf_num = hw->pf_id;
7174                 ctxt.vf_num = 0;
7175                 ctxt.uplink_seid = vsi->uplink_seid;
7176                 ctxt.connection_type = 0x1;     /* regular data port */
7177                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
7178
7179                 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
7180
7181                 /* This VSI is connected to VEB so the switch_id
7182                  * should be set to zero by default.
7183                  */
7184                 ctxt.info.switch_id = 0;
7185                 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
7186                 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
7187
7188                 /* Setup the VSI tx/rx queue map for TC0 only for now */
7189                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7190                 break;
7191
7192         case I40E_VSI_SRIOV:
7193                 ctxt.pf_num = hw->pf_id;
7194                 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
7195                 ctxt.uplink_seid = vsi->uplink_seid;
7196                 ctxt.connection_type = 0x1;     /* regular data port */
7197                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
7198
7199                 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
7200
7201                 /* This VSI is connected to VEB so the switch_id
7202                  * should be set to zero by default.
7203                  */
7204                 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
7205
7206                 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
7207                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
7208                 if (pf->vf[vsi->vf_id].spoofchk) {
7209                         ctxt.info.valid_sections |=
7210                                 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
7211                         ctxt.info.sec_flags |=
7212                                 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
7213                                  I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
7214                 }
7215                 /* Setup the VSI tx/rx queue map for TC0 only for now */
7216                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7217                 break;
7218
7219         default:
7220                 return -ENODEV;
7221         }
7222
7223         if (vsi->type != I40E_VSI_MAIN) {
7224                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
7225                 if (ret) {
7226                         dev_info(&vsi->back->pdev->dev,
7227                                  "add vsi failed, aq_err=%d\n",
7228                                  vsi->back->hw.aq.asq_last_status);
7229                         ret = -ENOENT;
7230                         goto err;
7231                 }
7232                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
7233                 vsi->info.valid_sections = 0;
7234                 vsi->seid = ctxt.seid;
7235                 vsi->id = ctxt.vsi_number;
7236         }
7237
7238         /* If macvlan filters already exist, force them to get loaded */
7239         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
7240                 f->changed = true;
7241                 f_count++;
7242         }
7243         if (f_count) {
7244                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
7245                 pf->flags |= I40E_FLAG_FILTER_SYNC;
7246         }
7247
7248         /* Update VSI BW information */
7249         ret = i40e_vsi_get_bw_info(vsi);
7250         if (ret) {
7251                 dev_info(&pf->pdev->dev,
7252                          "couldn't get vsi bw info, err %d, aq_err %d\n",
7253                          ret, pf->hw.aq.asq_last_status);
7254                 /* VSI is already added so not tearing that up */
7255                 ret = 0;
7256         }
7257
7258 err:
7259         return ret;
7260 }
7261
7262 /**
7263  * i40e_vsi_release - Delete a VSI and free its resources
7264  * @vsi: the VSI being removed
7265  *
7266  * Returns 0 on success or < 0 on error
7267  **/
7268 int i40e_vsi_release(struct i40e_vsi *vsi)
7269 {
7270         struct i40e_mac_filter *f, *ftmp;
7271         struct i40e_veb *veb = NULL;
7272         struct i40e_pf *pf;
7273         u16 uplink_seid;
7274         int i, n;
7275
7276         pf = vsi->back;
7277
7278         /* release of a VEB-owner or last VSI is not allowed */
7279         if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
7280                 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
7281                          vsi->seid, vsi->uplink_seid);
7282                 return -ENODEV;
7283         }
7284         if (vsi == pf->vsi[pf->lan_vsi] &&
7285             !test_bit(__I40E_DOWN, &pf->state)) {
7286                 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
7287                 return -ENODEV;
7288         }
7289
7290         uplink_seid = vsi->uplink_seid;
7291         if (vsi->type != I40E_VSI_SRIOV) {
7292                 if (vsi->netdev_registered) {
7293                         vsi->netdev_registered = false;
7294                         if (vsi->netdev) {
7295                                 /* results in a call to i40e_close() */
7296                                 unregister_netdev(vsi->netdev);
7297                         }
7298                 } else {
7299                         i40e_vsi_close(vsi);
7300                 }
7301                 i40e_vsi_disable_irq(vsi);
7302         }
7303
7304         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
7305                 i40e_del_filter(vsi, f->macaddr, f->vlan,
7306                                 f->is_vf, f->is_netdev);
7307         i40e_sync_vsi_filters(vsi);
7308
7309         i40e_vsi_delete(vsi);
7310         i40e_vsi_free_q_vectors(vsi);
7311         if (vsi->netdev) {
7312                 free_netdev(vsi->netdev);
7313                 vsi->netdev = NULL;
7314         }
7315         i40e_vsi_clear_rings(vsi);
7316         i40e_vsi_clear(vsi);
7317
7318         /* If this was the last thing on the VEB, except for the
7319          * controlling VSI, remove the VEB, which puts the controlling
7320          * VSI onto the next level down in the switch.
7321          *
7322          * Well, okay, there's one more exception here: don't remove
7323          * the orphan VEBs yet.  We'll wait for an explicit remove request
7324          * from up the network stack.
7325          */
7326         for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
7327                 if (pf->vsi[i] &&
7328                     pf->vsi[i]->uplink_seid == uplink_seid &&
7329                     (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
7330                         n++;      /* count the VSIs */
7331                 }
7332         }
7333         for (i = 0; i < I40E_MAX_VEB; i++) {
7334                 if (!pf->veb[i])
7335                         continue;
7336                 if (pf->veb[i]->uplink_seid == uplink_seid)
7337                         n++;     /* count the VEBs */
7338                 if (pf->veb[i]->seid == uplink_seid)
7339                         veb = pf->veb[i];
7340         }
7341         if (n == 0 && veb && veb->uplink_seid != 0)
7342                 i40e_veb_release(veb);
7343
7344         return 0;
7345 }
7346
7347 /**
7348  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
7349  * @vsi: ptr to the VSI
7350  *
7351  * This should only be called after i40e_vsi_mem_alloc() which allocates the
7352  * corresponding SW VSI structure and initializes num_queue_pairs for the
7353  * newly allocated VSI.
7354  *
7355  * Returns 0 on success or negative on failure
7356  **/
7357 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
7358 {
7359         int ret = -ENOENT;
7360         struct i40e_pf *pf = vsi->back;
7361
7362         if (vsi->q_vectors[0]) {
7363                 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
7364                          vsi->seid);
7365                 return -EEXIST;
7366         }
7367
7368         if (vsi->base_vector) {
7369                 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
7370                          vsi->seid, vsi->base_vector);
7371                 return -EEXIST;
7372         }
7373
7374         ret = i40e_vsi_alloc_q_vectors(vsi);
7375         if (ret) {
7376                 dev_info(&pf->pdev->dev,
7377                          "failed to allocate %d q_vector for VSI %d, ret=%d\n",
7378                          vsi->num_q_vectors, vsi->seid, ret);
7379                 vsi->num_q_vectors = 0;
7380                 goto vector_setup_out;
7381         }
7382
7383         if (vsi->num_q_vectors)
7384                 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
7385                                                  vsi->num_q_vectors, vsi->idx);
7386         if (vsi->base_vector < 0) {
7387                 dev_info(&pf->pdev->dev,
7388                          "failed to get queue tracking for VSI %d, err=%d\n",
7389                          vsi->seid, vsi->base_vector);
7390                 i40e_vsi_free_q_vectors(vsi);
7391                 ret = -ENOENT;
7392                 goto vector_setup_out;
7393         }
7394
7395 vector_setup_out:
7396         return ret;
7397 }
7398
7399 /**
7400  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
7401  * @vsi: pointer to the vsi.
7402  *
7403  * This re-allocates a vsi's queue resources.
7404  *
7405  * Returns pointer to the successfully allocated and configured VSI sw struct
7406  * on success, otherwise returns NULL on failure.
7407  **/
7408 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
7409 {
7410         struct i40e_pf *pf = vsi->back;
7411         u8 enabled_tc;
7412         int ret;
7413
7414         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7415         i40e_vsi_clear_rings(vsi);
7416
7417         i40e_vsi_free_arrays(vsi, false);
7418         i40e_set_num_rings_in_vsi(vsi);
7419         ret = i40e_vsi_alloc_arrays(vsi, false);
7420         if (ret)
7421                 goto err_vsi;
7422
7423         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
7424         if (ret < 0) {
7425                 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
7426                          vsi->seid, ret);
7427                 goto err_vsi;
7428         }
7429         vsi->base_queue = ret;
7430
7431         /* Update the FW view of the VSI. Force a reset of TC and queue
7432          * layout configurations.
7433          */
7434         enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
7435         pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
7436         pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
7437         i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
7438
7439         /* assign it some queues */
7440         ret = i40e_alloc_rings(vsi);
7441         if (ret)
7442                 goto err_rings;
7443
7444         /* map all of the rings to the q_vectors */
7445         i40e_vsi_map_rings_to_vectors(vsi);
7446         return vsi;
7447
7448 err_rings:
7449         i40e_vsi_free_q_vectors(vsi);
7450         if (vsi->netdev_registered) {
7451                 vsi->netdev_registered = false;
7452                 unregister_netdev(vsi->netdev);
7453                 free_netdev(vsi->netdev);
7454                 vsi->netdev = NULL;
7455         }
7456         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
7457 err_vsi:
7458         i40e_vsi_clear(vsi);
7459         return NULL;
7460 }
7461
7462 /**
7463  * i40e_vsi_setup - Set up a VSI by a given type
7464  * @pf: board private structure
7465  * @type: VSI type
7466  * @uplink_seid: the switch element to link to
7467  * @param1: usage depends upon VSI type. For VF types, indicates VF id
7468  *
7469  * This allocates the sw VSI structure and its queue resources, then add a VSI
7470  * to the identified VEB.
7471  *
7472  * Returns pointer to the successfully allocated and configure VSI sw struct on
7473  * success, otherwise returns NULL on failure.
7474  **/
7475 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
7476                                 u16 uplink_seid, u32 param1)
7477 {
7478         struct i40e_vsi *vsi = NULL;
7479         struct i40e_veb *veb = NULL;
7480         int ret, i;
7481         int v_idx;
7482
7483         /* The requested uplink_seid must be either
7484          *     - the PF's port seid
7485          *              no VEB is needed because this is the PF
7486          *              or this is a Flow Director special case VSI
7487          *     - seid of an existing VEB
7488          *     - seid of a VSI that owns an existing VEB
7489          *     - seid of a VSI that doesn't own a VEB
7490          *              a new VEB is created and the VSI becomes the owner
7491          *     - seid of the PF VSI, which is what creates the first VEB
7492          *              this is a special case of the previous
7493          *
7494          * Find which uplink_seid we were given and create a new VEB if needed
7495          */
7496         for (i = 0; i < I40E_MAX_VEB; i++) {
7497                 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
7498                         veb = pf->veb[i];
7499                         break;
7500                 }
7501         }
7502
7503         if (!veb && uplink_seid != pf->mac_seid) {
7504
7505                 for (i = 0; i < pf->num_alloc_vsi; i++) {
7506                         if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
7507                                 vsi = pf->vsi[i];
7508                                 break;
7509                         }
7510                 }
7511                 if (!vsi) {
7512                         dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
7513                                  uplink_seid);
7514                         return NULL;
7515                 }
7516
7517                 if (vsi->uplink_seid == pf->mac_seid)
7518                         veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
7519                                              vsi->tc_config.enabled_tc);
7520                 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
7521                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
7522                                              vsi->tc_config.enabled_tc);
7523
7524                 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
7525                         if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
7526                                 veb = pf->veb[i];
7527                 }
7528                 if (!veb) {
7529                         dev_info(&pf->pdev->dev, "couldn't add VEB\n");
7530                         return NULL;
7531                 }
7532
7533                 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
7534                 uplink_seid = veb->seid;
7535         }
7536
7537         /* get vsi sw struct */
7538         v_idx = i40e_vsi_mem_alloc(pf, type);
7539         if (v_idx < 0)
7540                 goto err_alloc;
7541         vsi = pf->vsi[v_idx];
7542         if (!vsi)
7543                 goto err_alloc;
7544         vsi->type = type;
7545         vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
7546
7547         if (type == I40E_VSI_MAIN)
7548                 pf->lan_vsi = v_idx;
7549         else if (type == I40E_VSI_SRIOV)
7550                 vsi->vf_id = param1;
7551         /* assign it some queues */
7552         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
7553                                 vsi->idx);
7554         if (ret < 0) {
7555                 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
7556                          vsi->seid, ret);
7557                 goto err_vsi;
7558         }
7559         vsi->base_queue = ret;
7560
7561         /* get a VSI from the hardware */
7562         vsi->uplink_seid = uplink_seid;
7563         ret = i40e_add_vsi(vsi);
7564         if (ret)
7565                 goto err_vsi;
7566
7567         switch (vsi->type) {
7568         /* setup the netdev if needed */
7569         case I40E_VSI_MAIN:
7570         case I40E_VSI_VMDQ2:
7571                 ret = i40e_config_netdev(vsi);
7572                 if (ret)
7573                         goto err_netdev;
7574                 ret = register_netdev(vsi->netdev);
7575                 if (ret)
7576                         goto err_netdev;
7577                 vsi->netdev_registered = true;
7578                 netif_carrier_off(vsi->netdev);
7579 #ifdef CONFIG_I40E_DCB
7580                 /* Setup DCB netlink interface */
7581                 i40e_dcbnl_setup(vsi);
7582 #endif /* CONFIG_I40E_DCB */
7583                 /* fall through */
7584
7585         case I40E_VSI_FDIR:
7586                 /* set up vectors and rings if needed */
7587                 ret = i40e_vsi_setup_vectors(vsi);
7588                 if (ret)
7589                         goto err_msix;
7590
7591                 ret = i40e_alloc_rings(vsi);
7592                 if (ret)
7593                         goto err_rings;
7594
7595                 /* map all of the rings to the q_vectors */
7596                 i40e_vsi_map_rings_to_vectors(vsi);
7597
7598                 i40e_vsi_reset_stats(vsi);
7599                 break;
7600
7601         default:
7602                 /* no netdev or rings for the other VSI types */
7603                 break;
7604         }
7605
7606         return vsi;
7607
7608 err_rings:
7609         i40e_vsi_free_q_vectors(vsi);
7610 err_msix:
7611         if (vsi->netdev_registered) {
7612                 vsi->netdev_registered = false;
7613                 unregister_netdev(vsi->netdev);
7614                 free_netdev(vsi->netdev);
7615                 vsi->netdev = NULL;
7616         }
7617 err_netdev:
7618         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
7619 err_vsi:
7620         i40e_vsi_clear(vsi);
7621 err_alloc:
7622         return NULL;
7623 }
7624
7625 /**
7626  * i40e_veb_get_bw_info - Query VEB BW information
7627  * @veb: the veb to query
7628  *
7629  * Query the Tx scheduler BW configuration data for given VEB
7630  **/
7631 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
7632 {
7633         struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
7634         struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
7635         struct i40e_pf *pf = veb->pf;
7636         struct i40e_hw *hw = &pf->hw;
7637         u32 tc_bw_max;
7638         int ret = 0;
7639         int i;
7640
7641         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
7642                                                   &bw_data, NULL);
7643         if (ret) {
7644                 dev_info(&pf->pdev->dev,
7645                          "query veb bw config failed, aq_err=%d\n",
7646                          hw->aq.asq_last_status);
7647                 goto out;
7648         }
7649
7650         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
7651                                                    &ets_data, NULL);
7652         if (ret) {
7653                 dev_info(&pf->pdev->dev,
7654                          "query veb bw ets config failed, aq_err=%d\n",
7655                          hw->aq.asq_last_status);
7656                 goto out;
7657         }
7658
7659         veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
7660         veb->bw_max_quanta = ets_data.tc_bw_max;
7661         veb->is_abs_credits = bw_data.absolute_credits_enable;
7662         tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
7663                     (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
7664         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7665                 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
7666                 veb->bw_tc_limit_credits[i] =
7667                                         le16_to_cpu(bw_data.tc_bw_limits[i]);
7668                 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
7669         }
7670
7671 out:
7672         return ret;
7673 }
7674
7675 /**
7676  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
7677  * @pf: board private structure
7678  *
7679  * On error: returns error code (negative)
7680  * On success: returns vsi index in PF (positive)
7681  **/
7682 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
7683 {
7684         int ret = -ENOENT;
7685         struct i40e_veb *veb;
7686         int i;
7687
7688         /* Need to protect the allocation of switch elements at the PF level */
7689         mutex_lock(&pf->switch_mutex);
7690
7691         /* VEB list may be fragmented if VEB creation/destruction has
7692          * been happening.  We can afford to do a quick scan to look
7693          * for any free slots in the list.
7694          *
7695          * find next empty veb slot, looping back around if necessary
7696          */
7697         i = 0;
7698         while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
7699                 i++;
7700         if (i >= I40E_MAX_VEB) {
7701                 ret = -ENOMEM;
7702                 goto err_alloc_veb;  /* out of VEB slots! */
7703         }
7704
7705         veb = kzalloc(sizeof(*veb), GFP_KERNEL);
7706         if (!veb) {
7707                 ret = -ENOMEM;
7708                 goto err_alloc_veb;
7709         }
7710         veb->pf = pf;
7711         veb->idx = i;
7712         veb->enabled_tc = 1;
7713
7714         pf->veb[i] = veb;
7715         ret = i;
7716 err_alloc_veb:
7717         mutex_unlock(&pf->switch_mutex);
7718         return ret;
7719 }
7720
7721 /**
7722  * i40e_switch_branch_release - Delete a branch of the switch tree
7723  * @branch: where to start deleting
7724  *
7725  * This uses recursion to find the tips of the branch to be
7726  * removed, deleting until we get back to and can delete this VEB.
7727  **/
7728 static void i40e_switch_branch_release(struct i40e_veb *branch)
7729 {
7730         struct i40e_pf *pf = branch->pf;
7731         u16 branch_seid = branch->seid;
7732         u16 veb_idx = branch->idx;
7733         int i;
7734
7735         /* release any VEBs on this VEB - RECURSION */
7736         for (i = 0; i < I40E_MAX_VEB; i++) {
7737                 if (!pf->veb[i])
7738                         continue;
7739                 if (pf->veb[i]->uplink_seid == branch->seid)
7740                         i40e_switch_branch_release(pf->veb[i]);
7741         }
7742
7743         /* Release the VSIs on this VEB, but not the owner VSI.
7744          *
7745          * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
7746          *       the VEB itself, so don't use (*branch) after this loop.
7747          */
7748         for (i = 0; i < pf->num_alloc_vsi; i++) {
7749                 if (!pf->vsi[i])
7750                         continue;
7751                 if (pf->vsi[i]->uplink_seid == branch_seid &&
7752                    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
7753                         i40e_vsi_release(pf->vsi[i]);
7754                 }
7755         }
7756
7757         /* There's one corner case where the VEB might not have been
7758          * removed, so double check it here and remove it if needed.
7759          * This case happens if the veb was created from the debugfs
7760          * commands and no VSIs were added to it.
7761          */
7762         if (pf->veb[veb_idx])
7763                 i40e_veb_release(pf->veb[veb_idx]);
7764 }
7765
7766 /**
7767  * i40e_veb_clear - remove veb struct
7768  * @veb: the veb to remove
7769  **/
7770 static void i40e_veb_clear(struct i40e_veb *veb)
7771 {
7772         if (!veb)
7773                 return;
7774
7775         if (veb->pf) {
7776                 struct i40e_pf *pf = veb->pf;
7777
7778                 mutex_lock(&pf->switch_mutex);
7779                 if (pf->veb[veb->idx] == veb)
7780                         pf->veb[veb->idx] = NULL;
7781                 mutex_unlock(&pf->switch_mutex);
7782         }
7783
7784         kfree(veb);
7785 }
7786
7787 /**
7788  * i40e_veb_release - Delete a VEB and free its resources
7789  * @veb: the VEB being removed
7790  **/
7791 void i40e_veb_release(struct i40e_veb *veb)
7792 {
7793         struct i40e_vsi *vsi = NULL;
7794         struct i40e_pf *pf;
7795         int i, n = 0;
7796
7797         pf = veb->pf;
7798
7799         /* find the remaining VSI and check for extras */
7800         for (i = 0; i < pf->num_alloc_vsi; i++) {
7801                 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
7802                         n++;
7803                         vsi = pf->vsi[i];
7804                 }
7805         }
7806         if (n != 1) {
7807                 dev_info(&pf->pdev->dev,
7808                          "can't remove VEB %d with %d VSIs left\n",
7809                          veb->seid, n);
7810                 return;
7811         }
7812
7813         /* move the remaining VSI to uplink veb */
7814         vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
7815         if (veb->uplink_seid) {
7816                 vsi->uplink_seid = veb->uplink_seid;
7817                 if (veb->uplink_seid == pf->mac_seid)
7818                         vsi->veb_idx = I40E_NO_VEB;
7819                 else
7820                         vsi->veb_idx = veb->veb_idx;
7821         } else {
7822                 /* floating VEB */
7823                 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
7824                 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
7825         }
7826
7827         i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
7828         i40e_veb_clear(veb);
7829 }
7830
7831 /**
7832  * i40e_add_veb - create the VEB in the switch
7833  * @veb: the VEB to be instantiated
7834  * @vsi: the controlling VSI
7835  **/
7836 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
7837 {
7838         bool is_default = false;
7839         bool is_cloud = false;
7840         int ret;
7841
7842         /* get a VEB from the hardware */
7843         ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
7844                               veb->enabled_tc, is_default,
7845                               is_cloud, &veb->seid, NULL);
7846         if (ret) {
7847                 dev_info(&veb->pf->pdev->dev,
7848                          "couldn't add VEB, err %d, aq_err %d\n",
7849                          ret, veb->pf->hw.aq.asq_last_status);
7850                 return -EPERM;
7851         }
7852
7853         /* get statistics counter */
7854         ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
7855                                          &veb->stats_idx, NULL, NULL, NULL);
7856         if (ret) {
7857                 dev_info(&veb->pf->pdev->dev,
7858                          "couldn't get VEB statistics idx, err %d, aq_err %d\n",
7859                          ret, veb->pf->hw.aq.asq_last_status);
7860                 return -EPERM;
7861         }
7862         ret = i40e_veb_get_bw_info(veb);
7863         if (ret) {
7864                 dev_info(&veb->pf->pdev->dev,
7865                          "couldn't get VEB bw info, err %d, aq_err %d\n",
7866                          ret, veb->pf->hw.aq.asq_last_status);
7867                 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
7868                 return -ENOENT;
7869         }
7870
7871         vsi->uplink_seid = veb->seid;
7872         vsi->veb_idx = veb->idx;
7873         vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
7874
7875         return 0;
7876 }
7877
7878 /**
7879  * i40e_veb_setup - Set up a VEB
7880  * @pf: board private structure
7881  * @flags: VEB setup flags
7882  * @uplink_seid: the switch element to link to
7883  * @vsi_seid: the initial VSI seid
7884  * @enabled_tc: Enabled TC bit-map
7885  *
7886  * This allocates the sw VEB structure and links it into the switch
7887  * It is possible and legal for this to be a duplicate of an already
7888  * existing VEB.  It is also possible for both uplink and vsi seids
7889  * to be zero, in order to create a floating VEB.
7890  *
7891  * Returns pointer to the successfully allocated VEB sw struct on
7892  * success, otherwise returns NULL on failure.
7893  **/
7894 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
7895                                 u16 uplink_seid, u16 vsi_seid,
7896                                 u8 enabled_tc)
7897 {
7898         struct i40e_veb *veb, *uplink_veb = NULL;
7899         int vsi_idx, veb_idx;
7900         int ret;
7901
7902         /* if one seid is 0, the other must be 0 to create a floating relay */
7903         if ((uplink_seid == 0 || vsi_seid == 0) &&
7904             (uplink_seid + vsi_seid != 0)) {
7905                 dev_info(&pf->pdev->dev,
7906                          "one, not both seid's are 0: uplink=%d vsi=%d\n",
7907                          uplink_seid, vsi_seid);
7908                 return NULL;
7909         }
7910
7911         /* make sure there is such a vsi and uplink */
7912         for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
7913                 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
7914                         break;
7915         if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
7916                 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
7917                          vsi_seid);
7918                 return NULL;
7919         }
7920
7921         if (uplink_seid && uplink_seid != pf->mac_seid) {
7922                 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
7923                         if (pf->veb[veb_idx] &&
7924                             pf->veb[veb_idx]->seid == uplink_seid) {
7925                                 uplink_veb = pf->veb[veb_idx];
7926                                 break;
7927                         }
7928                 }
7929                 if (!uplink_veb) {
7930                         dev_info(&pf->pdev->dev,
7931                                  "uplink seid %d not found\n", uplink_seid);
7932                         return NULL;
7933                 }
7934         }
7935
7936         /* get veb sw struct */
7937         veb_idx = i40e_veb_mem_alloc(pf);
7938         if (veb_idx < 0)
7939                 goto err_alloc;
7940         veb = pf->veb[veb_idx];
7941         veb->flags = flags;
7942         veb->uplink_seid = uplink_seid;
7943         veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
7944         veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
7945
7946         /* create the VEB in the switch */
7947         ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
7948         if (ret)
7949                 goto err_veb;
7950         if (vsi_idx == pf->lan_vsi)
7951                 pf->lan_veb = veb->idx;
7952
7953         return veb;
7954
7955 err_veb:
7956         i40e_veb_clear(veb);
7957 err_alloc:
7958         return NULL;
7959 }
7960
7961 /**
7962  * i40e_setup_pf_switch_element - set pf vars based on switch type
7963  * @pf: board private structure
7964  * @ele: element we are building info from
7965  * @num_reported: total number of elements
7966  * @printconfig: should we print the contents
7967  *
7968  * helper function to assist in extracting a few useful SEID values.
7969  **/
7970 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
7971                                 struct i40e_aqc_switch_config_element_resp *ele,
7972                                 u16 num_reported, bool printconfig)
7973 {
7974         u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
7975         u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
7976         u8 element_type = ele->element_type;
7977         u16 seid = le16_to_cpu(ele->seid);
7978
7979         if (printconfig)
7980                 dev_info(&pf->pdev->dev,
7981                          "type=%d seid=%d uplink=%d downlink=%d\n",
7982                          element_type, seid, uplink_seid, downlink_seid);
7983
7984         switch (element_type) {
7985         case I40E_SWITCH_ELEMENT_TYPE_MAC:
7986                 pf->mac_seid = seid;
7987                 break;
7988         case I40E_SWITCH_ELEMENT_TYPE_VEB:
7989                 /* Main VEB? */
7990                 if (uplink_seid != pf->mac_seid)
7991                         break;
7992                 if (pf->lan_veb == I40E_NO_VEB) {
7993                         int v;
7994
7995                         /* find existing or else empty VEB */
7996                         for (v = 0; v < I40E_MAX_VEB; v++) {
7997                                 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
7998                                         pf->lan_veb = v;
7999                                         break;
8000                                 }
8001                         }
8002                         if (pf->lan_veb == I40E_NO_VEB) {
8003                                 v = i40e_veb_mem_alloc(pf);
8004                                 if (v < 0)
8005                                         break;
8006                                 pf->lan_veb = v;
8007                         }
8008                 }
8009
8010                 pf->veb[pf->lan_veb]->seid = seid;
8011                 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
8012                 pf->veb[pf->lan_veb]->pf = pf;
8013                 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
8014                 break;
8015         case I40E_SWITCH_ELEMENT_TYPE_VSI:
8016                 if (num_reported != 1)
8017                         break;
8018                 /* This is immediately after a reset so we can assume this is
8019                  * the PF's VSI
8020                  */
8021                 pf->mac_seid = uplink_seid;
8022                 pf->pf_seid = downlink_seid;
8023                 pf->main_vsi_seid = seid;
8024                 if (printconfig)
8025                         dev_info(&pf->pdev->dev,
8026                                  "pf_seid=%d main_vsi_seid=%d\n",
8027                                  pf->pf_seid, pf->main_vsi_seid);
8028                 break;
8029         case I40E_SWITCH_ELEMENT_TYPE_PF:
8030         case I40E_SWITCH_ELEMENT_TYPE_VF:
8031         case I40E_SWITCH_ELEMENT_TYPE_EMP:
8032         case I40E_SWITCH_ELEMENT_TYPE_BMC:
8033         case I40E_SWITCH_ELEMENT_TYPE_PE:
8034         case I40E_SWITCH_ELEMENT_TYPE_PA:
8035                 /* ignore these for now */
8036                 break;
8037         default:
8038                 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
8039                          element_type, seid);
8040                 break;
8041         }
8042 }
8043
8044 /**
8045  * i40e_fetch_switch_configuration - Get switch config from firmware
8046  * @pf: board private structure
8047  * @printconfig: should we print the contents
8048  *
8049  * Get the current switch configuration from the device and
8050  * extract a few useful SEID values.
8051  **/
8052 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
8053 {
8054         struct i40e_aqc_get_switch_config_resp *sw_config;
8055         u16 next_seid = 0;
8056         int ret = 0;
8057         u8 *aq_buf;
8058         int i;
8059
8060         aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
8061         if (!aq_buf)
8062                 return -ENOMEM;
8063
8064         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
8065         do {
8066                 u16 num_reported, num_total;
8067
8068                 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
8069                                                 I40E_AQ_LARGE_BUF,
8070                                                 &next_seid, NULL);
8071                 if (ret) {
8072                         dev_info(&pf->pdev->dev,
8073                                  "get switch config failed %d aq_err=%x\n",
8074                                  ret, pf->hw.aq.asq_last_status);
8075                         kfree(aq_buf);
8076                         return -ENOENT;
8077                 }
8078
8079                 num_reported = le16_to_cpu(sw_config->header.num_reported);
8080                 num_total = le16_to_cpu(sw_config->header.num_total);
8081
8082                 if (printconfig)
8083                         dev_info(&pf->pdev->dev,
8084                                  "header: %d reported %d total\n",
8085                                  num_reported, num_total);
8086
8087                 for (i = 0; i < num_reported; i++) {
8088                         struct i40e_aqc_switch_config_element_resp *ele =
8089                                 &sw_config->element[i];
8090
8091                         i40e_setup_pf_switch_element(pf, ele, num_reported,
8092                                                      printconfig);
8093                 }
8094         } while (next_seid != 0);
8095
8096         kfree(aq_buf);
8097         return ret;
8098 }
8099
8100 /**
8101  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
8102  * @pf: board private structure
8103  * @reinit: if the Main VSI needs to re-initialized.
8104  *
8105  * Returns 0 on success, negative value on failure
8106  **/
8107 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
8108 {
8109         u32 rxfc = 0, txfc = 0, rxfc_reg;
8110         int ret;
8111
8112         /* find out what's out there already */
8113         ret = i40e_fetch_switch_configuration(pf, false);
8114         if (ret) {
8115                 dev_info(&pf->pdev->dev,
8116                          "couldn't fetch switch config, err %d, aq_err %d\n",
8117                          ret, pf->hw.aq.asq_last_status);
8118                 return ret;
8119         }
8120         i40e_pf_reset_stats(pf);
8121
8122         /* first time setup */
8123         if (pf->lan_vsi == I40E_NO_VSI || reinit) {
8124                 struct i40e_vsi *vsi = NULL;
8125                 u16 uplink_seid;
8126
8127                 /* Set up the PF VSI associated with the PF's main VSI
8128                  * that is already in the HW switch
8129                  */
8130                 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
8131                         uplink_seid = pf->veb[pf->lan_veb]->seid;
8132                 else
8133                         uplink_seid = pf->mac_seid;
8134                 if (pf->lan_vsi == I40E_NO_VSI)
8135                         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
8136                 else if (reinit)
8137                         vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
8138                 if (!vsi) {
8139                         dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
8140                         i40e_fdir_teardown(pf);
8141                         return -EAGAIN;
8142                 }
8143         } else {
8144                 /* force a reset of TC and queue layout configurations */
8145                 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8146                 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8147                 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8148                 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8149         }
8150         i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
8151
8152         i40e_fdir_sb_setup(pf);
8153
8154         /* Setup static PF queue filter control settings */
8155         ret = i40e_setup_pf_filter_control(pf);
8156         if (ret) {
8157                 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
8158                          ret);
8159                 /* Failure here should not stop continuing other steps */
8160         }
8161
8162         /* enable RSS in the HW, even for only one queue, as the stack can use
8163          * the hash
8164          */
8165         if ((pf->flags & I40E_FLAG_RSS_ENABLED))
8166                 i40e_config_rss(pf);
8167
8168         /* fill in link information and enable LSE reporting */
8169         i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
8170         i40e_link_event(pf);
8171
8172         /* Initialize user-specific link properties */
8173         pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
8174                                   I40E_AQ_AN_COMPLETED) ? true : false);
8175         /* requested_mode is set in probe or by ethtool */
8176         if (!pf->fc_autoneg_status)
8177                 goto no_autoneg;
8178
8179         if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
8180             (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
8181                 pf->hw.fc.current_mode = I40E_FC_FULL;
8182         else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
8183                 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
8184         else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
8185                 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
8186         else
8187                 pf->hw.fc.current_mode = I40E_FC_NONE;
8188
8189         /* sync the flow control settings with the auto-neg values */
8190         switch (pf->hw.fc.current_mode) {
8191         case I40E_FC_FULL:
8192                 txfc = 1;
8193                 rxfc = 1;
8194                 break;
8195         case I40E_FC_TX_PAUSE:
8196                 txfc = 1;
8197                 rxfc = 0;
8198                 break;
8199         case I40E_FC_RX_PAUSE:
8200                 txfc = 0;
8201                 rxfc = 1;
8202                 break;
8203         case I40E_FC_NONE:
8204         case I40E_FC_DEFAULT:
8205                 txfc = 0;
8206                 rxfc = 0;
8207                 break;
8208         case I40E_FC_PFC:
8209                 /* TBD */
8210                 break;
8211         /* no default case, we have to handle all possibilities here */
8212         }
8213
8214         wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
8215
8216         rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
8217                    ~I40E_PRTDCB_MFLCN_RFCE_MASK;
8218         rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
8219
8220         wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
8221
8222         goto fc_complete;
8223
8224 no_autoneg:
8225         /* disable L2 flow control, user can turn it on if they wish */
8226         wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
8227         wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
8228                                          ~I40E_PRTDCB_MFLCN_RFCE_MASK);
8229
8230 fc_complete:
8231         i40e_ptp_init(pf);
8232
8233         return ret;
8234 }
8235
8236 /**
8237  * i40e_determine_queue_usage - Work out queue distribution
8238  * @pf: board private structure
8239  **/
8240 static void i40e_determine_queue_usage(struct i40e_pf *pf)
8241 {
8242         int queues_left;
8243
8244         pf->num_lan_qps = 0;
8245
8246         /* Find the max queues to be put into basic use.  We'll always be
8247          * using TC0, whether or not DCB is running, and TC0 will get the
8248          * big RSS set.
8249          */
8250         queues_left = pf->hw.func_caps.num_tx_qp;
8251
8252         if ((queues_left == 1) ||
8253             !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
8254                 /* one qp for PF, no queues for anything else */
8255                 queues_left = 0;
8256                 pf->rss_size = pf->num_lan_qps = 1;
8257
8258                 /* make sure all the fancies are disabled */
8259                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
8260                                I40E_FLAG_FD_SB_ENABLED  |
8261                                I40E_FLAG_FD_ATR_ENABLED |
8262                                I40E_FLAG_DCB_CAPABLE    |
8263                                I40E_FLAG_SRIOV_ENABLED  |
8264                                I40E_FLAG_VMDQ_ENABLED);
8265         } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
8266                                   I40E_FLAG_FD_SB_ENABLED |
8267                                   I40E_FLAG_FD_ATR_ENABLED |
8268                                   I40E_FLAG_DCB_CAPABLE))) {
8269                 /* one qp for PF */
8270                 pf->rss_size = pf->num_lan_qps = 1;
8271                 queues_left -= pf->num_lan_qps;
8272
8273                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
8274                                I40E_FLAG_FD_SB_ENABLED  |
8275                                I40E_FLAG_FD_ATR_ENABLED |
8276                                I40E_FLAG_DCB_ENABLED    |
8277                                I40E_FLAG_VMDQ_ENABLED);
8278         } else {
8279                 /* Not enough queues for all TCs */
8280                 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
8281                     (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
8282                         pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
8283                         dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
8284                 }
8285                 pf->num_lan_qps = pf->rss_size_max;
8286                 queues_left -= pf->num_lan_qps;
8287         }
8288
8289         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8290                 if (queues_left > 1) {
8291                         queues_left -= 1; /* save 1 queue for FD */
8292                 } else {
8293                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8294                         dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
8295                 }
8296         }
8297
8298         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
8299             pf->num_vf_qps && pf->num_req_vfs && queues_left) {
8300                 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
8301                                         (queues_left / pf->num_vf_qps));
8302                 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
8303         }
8304
8305         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
8306             pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
8307                 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
8308                                           (queues_left / pf->num_vmdq_qps));
8309                 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
8310         }
8311
8312         pf->queues_left = queues_left;
8313 }
8314
8315 /**
8316  * i40e_setup_pf_filter_control - Setup PF static filter control
8317  * @pf: PF to be setup
8318  *
8319  * i40e_setup_pf_filter_control sets up a pf's initial filter control
8320  * settings. If PE/FCoE are enabled then it will also set the per PF
8321  * based filter sizes required for them. It also enables Flow director,
8322  * ethertype and macvlan type filter settings for the pf.
8323  *
8324  * Returns 0 on success, negative on failure
8325  **/
8326 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
8327 {
8328         struct i40e_filter_control_settings *settings = &pf->filter_settings;
8329
8330         settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
8331
8332         /* Flow Director is enabled */
8333         if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
8334                 settings->enable_fdir = true;
8335
8336         /* Ethtype and MACVLAN filters enabled for PF */
8337         settings->enable_ethtype = true;
8338         settings->enable_macvlan = true;
8339
8340         if (i40e_set_filter_control(&pf->hw, settings))
8341                 return -ENOENT;
8342
8343         return 0;
8344 }
8345
8346 #define INFO_STRING_LEN 255
8347 static void i40e_print_features(struct i40e_pf *pf)
8348 {
8349         struct i40e_hw *hw = &pf->hw;
8350         char *buf, *string;
8351
8352         string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
8353         if (!string) {
8354                 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
8355                 return;
8356         }
8357
8358         buf = string;
8359
8360         buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
8361 #ifdef CONFIG_PCI_IOV
8362         buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
8363 #endif
8364         buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
8365                        pf->vsi[pf->lan_vsi]->num_queue_pairs);
8366
8367         if (pf->flags & I40E_FLAG_RSS_ENABLED)
8368                 buf += sprintf(buf, "RSS ");
8369         if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
8370                 buf += sprintf(buf, "FD_ATR ");
8371         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8372                 buf += sprintf(buf, "FD_SB ");
8373                 buf += sprintf(buf, "NTUPLE ");
8374         }
8375         if (pf->flags & I40E_FLAG_DCB_CAPABLE)
8376                 buf += sprintf(buf, "DCB ");
8377         if (pf->flags & I40E_FLAG_PTP)
8378                 buf += sprintf(buf, "PTP ");
8379
8380         BUG_ON(buf > (string + INFO_STRING_LEN));
8381         dev_info(&pf->pdev->dev, "%s\n", string);
8382         kfree(string);
8383 }
8384
8385 /**
8386  * i40e_probe - Device initialization routine
8387  * @pdev: PCI device information struct
8388  * @ent: entry in i40e_pci_tbl
8389  *
8390  * i40e_probe initializes a pf identified by a pci_dev structure.
8391  * The OS initialization, configuring of the pf private structure,
8392  * and a hardware reset occur.
8393  *
8394  * Returns 0 on success, negative on failure
8395  **/
8396 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8397 {
8398         struct i40e_pf *pf;
8399         struct i40e_hw *hw;
8400         static u16 pfs_found;
8401         u16 link_status;
8402         int err = 0;
8403         u32 len;
8404         u32 i;
8405
8406         err = pci_enable_device_mem(pdev);
8407         if (err)
8408                 return err;
8409
8410         /* set up for high or low dma */
8411         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8412         if (err) {
8413                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8414                 if (err) {
8415                         dev_err(&pdev->dev,
8416                                 "DMA configuration failed: 0x%x\n", err);
8417                         goto err_dma;
8418                 }
8419         }
8420
8421         /* set up pci connections */
8422         err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
8423                                            IORESOURCE_MEM), i40e_driver_name);
8424         if (err) {
8425                 dev_info(&pdev->dev,
8426                          "pci_request_selected_regions failed %d\n", err);
8427                 goto err_pci_reg;
8428         }
8429
8430         pci_enable_pcie_error_reporting(pdev);
8431         pci_set_master(pdev);
8432
8433         /* Now that we have a PCI connection, we need to do the
8434          * low level device setup.  This is primarily setting up
8435          * the Admin Queue structures and then querying for the
8436          * device's current profile information.
8437          */
8438         pf = kzalloc(sizeof(*pf), GFP_KERNEL);
8439         if (!pf) {
8440                 err = -ENOMEM;
8441                 goto err_pf_alloc;
8442         }
8443         pf->next_vsi = 0;
8444         pf->pdev = pdev;
8445         set_bit(__I40E_DOWN, &pf->state);
8446
8447         hw = &pf->hw;
8448         hw->back = pf;
8449         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
8450                               pci_resource_len(pdev, 0));
8451         if (!hw->hw_addr) {
8452                 err = -EIO;
8453                 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
8454                          (unsigned int)pci_resource_start(pdev, 0),
8455                          (unsigned int)pci_resource_len(pdev, 0), err);
8456                 goto err_ioremap;
8457         }
8458         hw->vendor_id = pdev->vendor;
8459         hw->device_id = pdev->device;
8460         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
8461         hw->subsystem_vendor_id = pdev->subsystem_vendor;
8462         hw->subsystem_device_id = pdev->subsystem_device;
8463         hw->bus.device = PCI_SLOT(pdev->devfn);
8464         hw->bus.func = PCI_FUNC(pdev->devfn);
8465         pf->instance = pfs_found;
8466
8467         /* do a special CORER for clearing PXE mode once at init */
8468         if (hw->revision_id == 0 &&
8469             (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
8470                 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
8471                 i40e_flush(hw);
8472                 msleep(200);
8473                 pf->corer_count++;
8474
8475                 i40e_clear_pxe_mode(hw);
8476         }
8477
8478         /* Reset here to make sure all is clean and to define PF 'n' */
8479         err = i40e_pf_reset(hw);
8480         if (err) {
8481                 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
8482                 goto err_pf_reset;
8483         }
8484         pf->pfr_count++;
8485
8486         hw->aq.num_arq_entries = I40E_AQ_LEN;
8487         hw->aq.num_asq_entries = I40E_AQ_LEN;
8488         hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
8489         hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
8490         pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
8491         snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
8492                  "%s-pf%d:misc",
8493                  dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
8494
8495         err = i40e_init_shared_code(hw);
8496         if (err) {
8497                 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
8498                 goto err_pf_reset;
8499         }
8500
8501         /* set up a default setting for link flow control */
8502         pf->hw.fc.requested_mode = I40E_FC_NONE;
8503
8504         err = i40e_init_adminq(hw);
8505         dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
8506         if (err) {
8507                 dev_info(&pdev->dev,
8508                          "init_adminq failed: %d expecting API %02x.%02x\n",
8509                          err,
8510                          I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
8511                 goto err_pf_reset;
8512         }
8513
8514         i40e_verify_eeprom(pf);
8515
8516         /* Rev 0 hardware was never productized */
8517         if (hw->revision_id < 1)
8518                 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
8519
8520         i40e_clear_pxe_mode(hw);
8521         err = i40e_get_capabilities(pf);
8522         if (err)
8523                 goto err_adminq_setup;
8524
8525         err = i40e_sw_init(pf);
8526         if (err) {
8527                 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
8528                 goto err_sw_init;
8529         }
8530
8531         err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
8532                                 hw->func_caps.num_rx_qp,
8533                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
8534         if (err) {
8535                 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
8536                 goto err_init_lan_hmc;
8537         }
8538
8539         err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
8540         if (err) {
8541                 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
8542                 err = -ENOENT;
8543                 goto err_configure_lan_hmc;
8544         }
8545
8546         i40e_get_mac_addr(hw, hw->mac.addr);
8547         if (!is_valid_ether_addr(hw->mac.addr)) {
8548                 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
8549                 err = -EIO;
8550                 goto err_mac_addr;
8551         }
8552         dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
8553         ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
8554
8555         pci_set_drvdata(pdev, pf);
8556         pci_save_state(pdev);
8557 #ifdef CONFIG_I40E_DCB
8558         err = i40e_init_pf_dcb(pf);
8559         if (err) {
8560                 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
8561                 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
8562                 /* Continue without DCB enabled */
8563         }
8564 #endif /* CONFIG_I40E_DCB */
8565
8566         /* set up periodic task facility */
8567         setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
8568         pf->service_timer_period = HZ;
8569
8570         INIT_WORK(&pf->service_task, i40e_service_task);
8571         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
8572         pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
8573         pf->link_check_timeout = jiffies;
8574
8575         /* WoL defaults to disabled */
8576         pf->wol_en = false;
8577         device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
8578
8579         /* set up the main switch operations */
8580         i40e_determine_queue_usage(pf);
8581         i40e_init_interrupt_scheme(pf);
8582
8583         /* The number of VSIs reported by the FW is the minimum guaranteed
8584          * to us; HW supports far more and we share the remaining pool with
8585          * the other PFs. We allocate space for more than the guarantee with
8586          * the understanding that we might not get them all later.
8587          */
8588         if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
8589                 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
8590         else
8591                 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
8592
8593         /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
8594         len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
8595         pf->vsi = kzalloc(len, GFP_KERNEL);
8596         if (!pf->vsi) {
8597                 err = -ENOMEM;
8598                 goto err_switch_setup;
8599         }
8600
8601         err = i40e_setup_pf_switch(pf, false);
8602         if (err) {
8603                 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
8604                 goto err_vsis;
8605         }
8606         /* if FDIR VSI was set up, start it now */
8607         for (i = 0; i < pf->num_alloc_vsi; i++) {
8608                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
8609                         i40e_vsi_open(pf->vsi[i]);
8610                         break;
8611                 }
8612         }
8613
8614         /* The main driver is (mostly) up and happy. We need to set this state
8615          * before setting up the misc vector or we get a race and the vector
8616          * ends up disabled forever.
8617          */
8618         clear_bit(__I40E_DOWN, &pf->state);
8619
8620         /* In case of MSIX we are going to setup the misc vector right here
8621          * to handle admin queue events etc. In case of legacy and MSI
8622          * the misc functionality and queue processing is combined in
8623          * the same vector and that gets setup at open.
8624          */
8625         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
8626                 err = i40e_setup_misc_vector(pf);
8627                 if (err) {
8628                         dev_info(&pdev->dev,
8629                                  "setup of misc vector failed: %d\n", err);
8630                         goto err_vsis;
8631                 }
8632         }
8633
8634 #ifdef CONFIG_PCI_IOV
8635         /* prep for VF support */
8636         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
8637             (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
8638             !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
8639                 u32 val;
8640
8641                 /* disable link interrupts for VFs */
8642                 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
8643                 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
8644                 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
8645                 i40e_flush(hw);
8646
8647                 if (pci_num_vf(pdev)) {
8648                         dev_info(&pdev->dev,
8649                                  "Active VFs found, allocating resources.\n");
8650                         err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
8651                         if (err)
8652                                 dev_info(&pdev->dev,
8653                                          "Error %d allocating resources for existing VFs\n",
8654                                          err);
8655                 }
8656         }
8657 #endif /* CONFIG_PCI_IOV */
8658
8659         pfs_found++;
8660
8661         i40e_dbg_pf_init(pf);
8662
8663         /* tell the firmware that we're starting */
8664         i40e_send_version(pf);
8665
8666         /* since everything's happy, start the service_task timer */
8667         mod_timer(&pf->service_timer,
8668                   round_jiffies(jiffies + pf->service_timer_period));
8669
8670         /* Get the negotiated link width and speed from PCI config space */
8671         pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
8672
8673         i40e_set_pci_config_data(hw, link_status);
8674
8675         dev_info(&pdev->dev, "PCI-Express: %s %s\n",
8676                 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
8677                  hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
8678                  hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
8679                  "Unknown"),
8680                 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
8681                  hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
8682                  hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
8683                  hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
8684                  "Unknown"));
8685
8686         if (hw->bus.width < i40e_bus_width_pcie_x8 ||
8687             hw->bus.speed < i40e_bus_speed_8000) {
8688                 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
8689                 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
8690         }
8691
8692         /* print a string summarizing features */
8693         i40e_print_features(pf);
8694
8695         return 0;
8696
8697         /* Unwind what we've done if something failed in the setup */
8698 err_vsis:
8699         set_bit(__I40E_DOWN, &pf->state);
8700         i40e_clear_interrupt_scheme(pf);
8701         kfree(pf->vsi);
8702 err_switch_setup:
8703         i40e_reset_interrupt_capability(pf);
8704         del_timer_sync(&pf->service_timer);
8705 err_mac_addr:
8706 err_configure_lan_hmc:
8707         (void)i40e_shutdown_lan_hmc(hw);
8708 err_init_lan_hmc:
8709         kfree(pf->qp_pile);
8710         kfree(pf->irq_pile);
8711 err_sw_init:
8712 err_adminq_setup:
8713         (void)i40e_shutdown_adminq(hw);
8714 err_pf_reset:
8715         iounmap(hw->hw_addr);
8716 err_ioremap:
8717         kfree(pf);
8718 err_pf_alloc:
8719         pci_disable_pcie_error_reporting(pdev);
8720         pci_release_selected_regions(pdev,
8721                                      pci_select_bars(pdev, IORESOURCE_MEM));
8722 err_pci_reg:
8723 err_dma:
8724         pci_disable_device(pdev);
8725         return err;
8726 }
8727
8728 /**
8729  * i40e_remove - Device removal routine
8730  * @pdev: PCI device information struct
8731  *
8732  * i40e_remove is called by the PCI subsystem to alert the driver
8733  * that is should release a PCI device.  This could be caused by a
8734  * Hot-Plug event, or because the driver is going to be removed from
8735  * memory.
8736  **/
8737 static void i40e_remove(struct pci_dev *pdev)
8738 {
8739         struct i40e_pf *pf = pci_get_drvdata(pdev);
8740         i40e_status ret_code;
8741         u32 reg;
8742         int i;
8743
8744         i40e_dbg_pf_exit(pf);
8745
8746         i40e_ptp_stop(pf);
8747
8748         /* no more scheduling of any task */
8749         set_bit(__I40E_DOWN, &pf->state);
8750         del_timer_sync(&pf->service_timer);
8751         cancel_work_sync(&pf->service_task);
8752
8753         if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
8754                 i40e_free_vfs(pf);
8755                 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
8756         }
8757
8758         i40e_fdir_teardown(pf);
8759
8760         /* If there is a switch structure or any orphans, remove them.
8761          * This will leave only the PF's VSI remaining.
8762          */
8763         for (i = 0; i < I40E_MAX_VEB; i++) {
8764                 if (!pf->veb[i])
8765                         continue;
8766
8767                 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
8768                     pf->veb[i]->uplink_seid == 0)
8769                         i40e_switch_branch_release(pf->veb[i]);
8770         }
8771
8772         /* Now we can shutdown the PF's VSI, just before we kill
8773          * adminq and hmc.
8774          */
8775         if (pf->vsi[pf->lan_vsi])
8776                 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
8777
8778         i40e_stop_misc_vector(pf);
8779         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
8780                 synchronize_irq(pf->msix_entries[0].vector);
8781                 free_irq(pf->msix_entries[0].vector, pf);
8782         }
8783
8784         /* shutdown and destroy the HMC */
8785         if (pf->hw.hmc.hmc_obj) {
8786                 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
8787                 if (ret_code)
8788                         dev_warn(&pdev->dev,
8789                                  "Failed to destroy the HMC resources: %d\n",
8790                                  ret_code);
8791         }
8792
8793         /* shutdown the adminq */
8794         ret_code = i40e_shutdown_adminq(&pf->hw);
8795         if (ret_code)
8796                 dev_warn(&pdev->dev,
8797                          "Failed to destroy the Admin Queue resources: %d\n",
8798                          ret_code);
8799
8800         /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
8801         i40e_clear_interrupt_scheme(pf);
8802         for (i = 0; i < pf->num_alloc_vsi; i++) {
8803                 if (pf->vsi[i]) {
8804                         i40e_vsi_clear_rings(pf->vsi[i]);
8805                         i40e_vsi_clear(pf->vsi[i]);
8806                         pf->vsi[i] = NULL;
8807                 }
8808         }
8809
8810         for (i = 0; i < I40E_MAX_VEB; i++) {
8811                 kfree(pf->veb[i]);
8812                 pf->veb[i] = NULL;
8813         }
8814
8815         kfree(pf->qp_pile);
8816         kfree(pf->irq_pile);
8817         kfree(pf->vsi);
8818
8819         /* force a PF reset to clean anything leftover */
8820         reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
8821         wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
8822         i40e_flush(&pf->hw);
8823
8824         iounmap(pf->hw.hw_addr);
8825         kfree(pf);
8826         pci_release_selected_regions(pdev,
8827                                      pci_select_bars(pdev, IORESOURCE_MEM));
8828
8829         pci_disable_pcie_error_reporting(pdev);
8830         pci_disable_device(pdev);
8831 }
8832
8833 /**
8834  * i40e_pci_error_detected - warning that something funky happened in PCI land
8835  * @pdev: PCI device information struct
8836  *
8837  * Called to warn that something happened and the error handling steps
8838  * are in progress.  Allows the driver to quiesce things, be ready for
8839  * remediation.
8840  **/
8841 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
8842                                                 enum pci_channel_state error)
8843 {
8844         struct i40e_pf *pf = pci_get_drvdata(pdev);
8845
8846         dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
8847
8848         /* shutdown all operations */
8849         if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
8850                 rtnl_lock();
8851                 i40e_prep_for_reset(pf);
8852                 rtnl_unlock();
8853         }
8854
8855         /* Request a slot reset */
8856         return PCI_ERS_RESULT_NEED_RESET;
8857 }
8858
8859 /**
8860  * i40e_pci_error_slot_reset - a PCI slot reset just happened
8861  * @pdev: PCI device information struct
8862  *
8863  * Called to find if the driver can work with the device now that
8864  * the pci slot has been reset.  If a basic connection seems good
8865  * (registers are readable and have sane content) then return a
8866  * happy little PCI_ERS_RESULT_xxx.
8867  **/
8868 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
8869 {
8870         struct i40e_pf *pf = pci_get_drvdata(pdev);
8871         pci_ers_result_t result;
8872         int err;
8873         u32 reg;
8874
8875         dev_info(&pdev->dev, "%s\n", __func__);
8876         if (pci_enable_device_mem(pdev)) {
8877                 dev_info(&pdev->dev,
8878                          "Cannot re-enable PCI device after reset.\n");
8879                 result = PCI_ERS_RESULT_DISCONNECT;
8880         } else {
8881                 pci_set_master(pdev);
8882                 pci_restore_state(pdev);
8883                 pci_save_state(pdev);
8884                 pci_wake_from_d3(pdev, false);
8885
8886                 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8887                 if (reg == 0)
8888                         result = PCI_ERS_RESULT_RECOVERED;
8889                 else
8890                         result = PCI_ERS_RESULT_DISCONNECT;
8891         }
8892
8893         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8894         if (err) {
8895                 dev_info(&pdev->dev,
8896                          "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8897                          err);
8898                 /* non-fatal, continue */
8899         }
8900
8901         return result;
8902 }
8903
8904 /**
8905  * i40e_pci_error_resume - restart operations after PCI error recovery
8906  * @pdev: PCI device information struct
8907  *
8908  * Called to allow the driver to bring things back up after PCI error
8909  * and/or reset recovery has finished.
8910  **/
8911 static void i40e_pci_error_resume(struct pci_dev *pdev)
8912 {
8913         struct i40e_pf *pf = pci_get_drvdata(pdev);
8914
8915         dev_info(&pdev->dev, "%s\n", __func__);
8916         if (test_bit(__I40E_SUSPENDED, &pf->state))
8917                 return;
8918
8919         rtnl_lock();
8920         i40e_handle_reset_warning(pf);
8921         rtnl_lock();
8922 }
8923
8924 /**
8925  * i40e_shutdown - PCI callback for shutting down
8926  * @pdev: PCI device information struct
8927  **/
8928 static void i40e_shutdown(struct pci_dev *pdev)
8929 {
8930         struct i40e_pf *pf = pci_get_drvdata(pdev);
8931         struct i40e_hw *hw = &pf->hw;
8932
8933         set_bit(__I40E_SUSPENDED, &pf->state);
8934         set_bit(__I40E_DOWN, &pf->state);
8935         rtnl_lock();
8936         i40e_prep_for_reset(pf);
8937         rtnl_unlock();
8938
8939         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
8940         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
8941
8942         if (system_state == SYSTEM_POWER_OFF) {
8943                 pci_wake_from_d3(pdev, pf->wol_en);
8944                 pci_set_power_state(pdev, PCI_D3hot);
8945         }
8946 }
8947
8948 #ifdef CONFIG_PM
8949 /**
8950  * i40e_suspend - PCI callback for moving to D3
8951  * @pdev: PCI device information struct
8952  **/
8953 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
8954 {
8955         struct i40e_pf *pf = pci_get_drvdata(pdev);
8956         struct i40e_hw *hw = &pf->hw;
8957
8958         set_bit(__I40E_SUSPENDED, &pf->state);
8959         set_bit(__I40E_DOWN, &pf->state);
8960         rtnl_lock();
8961         i40e_prep_for_reset(pf);
8962         rtnl_unlock();
8963
8964         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
8965         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
8966
8967         pci_wake_from_d3(pdev, pf->wol_en);
8968         pci_set_power_state(pdev, PCI_D3hot);
8969
8970         return 0;
8971 }
8972
8973 /**
8974  * i40e_resume - PCI callback for waking up from D3
8975  * @pdev: PCI device information struct
8976  **/
8977 static int i40e_resume(struct pci_dev *pdev)
8978 {
8979         struct i40e_pf *pf = pci_get_drvdata(pdev);
8980         u32 err;
8981
8982         pci_set_power_state(pdev, PCI_D0);
8983         pci_restore_state(pdev);
8984         /* pci_restore_state() clears dev->state_saves, so
8985          * call pci_save_state() again to restore it.
8986          */
8987         pci_save_state(pdev);
8988
8989         err = pci_enable_device_mem(pdev);
8990         if (err) {
8991                 dev_err(&pdev->dev,
8992                         "%s: Cannot enable PCI device from suspend\n",
8993                         __func__);
8994                 return err;
8995         }
8996         pci_set_master(pdev);
8997
8998         /* no wakeup events while running */
8999         pci_wake_from_d3(pdev, false);
9000
9001         /* handling the reset will rebuild the device state */
9002         if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
9003                 clear_bit(__I40E_DOWN, &pf->state);
9004                 rtnl_lock();
9005                 i40e_reset_and_rebuild(pf, false);
9006                 rtnl_unlock();
9007         }
9008
9009         return 0;
9010 }
9011
9012 #endif
9013 static const struct pci_error_handlers i40e_err_handler = {
9014         .error_detected = i40e_pci_error_detected,
9015         .slot_reset = i40e_pci_error_slot_reset,
9016         .resume = i40e_pci_error_resume,
9017 };
9018
9019 static struct pci_driver i40e_driver = {
9020         .name     = i40e_driver_name,
9021         .id_table = i40e_pci_tbl,
9022         .probe    = i40e_probe,
9023         .remove   = i40e_remove,
9024 #ifdef CONFIG_PM
9025         .suspend  = i40e_suspend,
9026         .resume   = i40e_resume,
9027 #endif
9028         .shutdown = i40e_shutdown,
9029         .err_handler = &i40e_err_handler,
9030         .sriov_configure = i40e_pci_sriov_configure,
9031 };
9032
9033 /**
9034  * i40e_init_module - Driver registration routine
9035  *
9036  * i40e_init_module is the first routine called when the driver is
9037  * loaded. All it does is register with the PCI subsystem.
9038  **/
9039 static int __init i40e_init_module(void)
9040 {
9041         pr_info("%s: %s - version %s\n", i40e_driver_name,
9042                 i40e_driver_string, i40e_driver_version_str);
9043         pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
9044         i40e_dbg_init();
9045         return pci_register_driver(&i40e_driver);
9046 }
9047 module_init(i40e_init_module);
9048
9049 /**
9050  * i40e_exit_module - Driver exit cleanup routine
9051  *
9052  * i40e_exit_module is called just before the driver is removed
9053  * from memory.
9054  **/
9055 static void __exit i40e_exit_module(void)
9056 {
9057         pci_unregister_driver(&i40e_driver);
9058         i40e_dbg_exit();
9059 }
9060 module_exit(i40e_exit_module);