i40e: fix panic due to too-early Tx queue enable
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
index 275ca9a1719ed812e7ee15ac8bfda447ee31e217..4071b4e551ccc086ead84e05ec5b114f6252d89e 100644 (file)
@@ -37,9 +37,9 @@ static const char i40e_driver_string[] =
 
 #define DRV_KERN "-k"
 
-#define DRV_VERSION_MAJOR 0
-#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 10
+#define DRV_VERSION_MAJOR 1
+#define DRV_VERSION_MINOR 0
+#define DRV_VERSION_BUILD 4
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -65,7 +65,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb);
  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  *   Class, Class Mask, private data (not used) }
  */
-static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
+static const struct pci_device_id i40e_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
@@ -269,7 +269,11 @@ static void i40e_service_event_schedule(struct i40e_pf *pf)
  * device is munged, not just the one netdev port, so go for the full
  * reset.
  **/
+#ifdef I40E_FCOE
+void i40e_tx_timeout(struct net_device *netdev)
+#else
 static void i40e_tx_timeout(struct net_device *netdev)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -278,7 +282,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
        pf->tx_timeout_count++;
 
        if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
-               pf->tx_timeout_recovery_level = 0;
+               pf->tx_timeout_recovery_level = 1;
        pf->tx_timeout_last_recovery = jiffies;
        netdev_info(netdev, "tx_timeout recovery level %d\n",
                    pf->tx_timeout_recovery_level);
@@ -304,8 +308,8 @@ static void i40e_tx_timeout(struct net_device *netdev)
                break;
        default:
                netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
-               set_bit(__I40E_DOWN, &vsi->state);
-               i40e_down(vsi);
+               set_bit(__I40E_DOWN_REQUESTED, &pf->state);
+               set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
                break;
        }
        i40e_service_event_schedule(pf);
@@ -349,9 +353,15 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
  * Returns the address of the device statistics structure.
  * The statistics are actually updated from the service task.
  **/
+#ifdef I40E_FCOE
+struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+                                            struct net_device *netdev,
+                                            struct rtnl_link_stats64 *stats)
+#else
 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
                                             struct net_device *netdev,
                                             struct rtnl_link_stats64 *stats)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_ring *tx_ring, *rx_ring;
@@ -444,9 +454,21 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
  **/
 void i40e_pf_reset_stats(struct i40e_pf *pf)
 {
+       int i;
+
        memset(&pf->stats, 0, sizeof(pf->stats));
        memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
        pf->stat_offsets_loaded = false;
+
+       for (i = 0; i < I40E_MAX_VEB; i++) {
+               if (pf->veb[i]) {
+                       memset(&pf->veb[i]->stats, 0,
+                              sizeof(pf->veb[i]->stats));
+                       memset(&pf->veb[i]->stats_offsets, 0,
+                              sizeof(pf->veb[i]->stats_offsets));
+                       pf->veb[i]->stat_offsets_loaded = false;
+               }
+       }
 }
 
 /**
@@ -624,6 +646,55 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
        veb->stat_offsets_loaded = true;
 }
 
+#ifdef I40E_FCOE
+/**
+ * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
+ * @vsi: the VSI that is capable of doing FCoE
+ **/
+static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_fcoe_stats *ofs;
+       struct i40e_fcoe_stats *fs;     /* device's eth stats */
+       int idx;
+
+       if (vsi->type != I40E_VSI_FCOE)
+               return;
+
+       idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
+       fs = &vsi->fcoe_stats;
+       ofs = &vsi->fcoe_stats_offsets;
+
+       i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
+       i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
+       i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
+       i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
+       i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
+       i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
+       i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->fcoe_last_error, &fs->fcoe_last_error);
+       i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
+
+       vsi->fcoe_stat_offsets_loaded = true;
+}
+
+#endif
 /**
  * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
  * @pf: the corresponding PF
@@ -1052,6 +1123,9 @@ void i40e_update_stats(struct i40e_vsi *vsi)
                i40e_update_pf_stats(pf);
 
        i40e_update_vsi_stats(vsi);
+#ifdef I40E_FCOE
+       i40e_update_fcoe_stats(vsi);
+#endif
 }
 
 /**
@@ -1165,8 +1239,11 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
  * @vsi: the PF Main VSI - inappropriate for any other VSI
  * @macaddr: the MAC address
+ *
+ * Some older firmware configurations set up a default promiscuous VLAN
+ * filter that needs to be removed.
  **/
-static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
+static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
 {
        struct i40e_aqc_remove_macvlan_element_data element;
        struct i40e_pf *pf = vsi->back;
@@ -1174,15 +1251,18 @@ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
 
        /* Only appropriate for the PF main VSI */
        if (vsi->type != I40E_VSI_MAIN)
-               return;
+               return -EINVAL;
 
+       memset(&element, 0, sizeof(element));
        ether_addr_copy(element.mac_addr, macaddr);
        element.vlan_tag = 0;
        element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
                        I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
        aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
        if (aq_ret)
-               dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n");
+               return -ENOENT;
+
+       return 0;
 }
 
 /**
@@ -1303,29 +1383,42 @@ void i40e_del_filter(struct i40e_vsi *vsi,
  *
  * Returns 0 on success, negative on failure
  **/
+#ifdef I40E_FCOE
+int i40e_set_mac(struct net_device *netdev, void *p)
+#else
 static int i40e_set_mac(struct net_device *netdev, void *p)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
        struct sockaddr *addr = p;
        struct i40e_mac_filter *f;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
-       netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
-
-       if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+       if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
+               netdev_info(netdev, "already using mac address %pM\n",
+                           addr->sa_data);
                return 0;
+       }
 
        if (test_bit(__I40E_DOWN, &vsi->back->state) ||
            test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
                return -EADDRNOTAVAIL;
 
+       if (ether_addr_equal(hw->mac.addr, addr->sa_data))
+               netdev_info(netdev, "returning to hw mac address %pM\n",
+                           hw->mac.addr);
+       else
+               netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
+
        if (vsi->type == I40E_VSI_MAIN) {
                i40e_status ret;
                ret = i40e_aq_mac_address_write(&vsi->back->hw,
-                                               I40E_AQC_WRITE_TYPE_LAA_ONLY,
+                                               I40E_AQC_WRITE_TYPE_LAA_WOL,
                                                addr->sa_data, NULL);
                if (ret) {
                        netdev_info(netdev,
@@ -1333,21 +1426,35 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
                                    ret);
                        return -EADDRNOTAVAIL;
                }
+       }
 
-               ether_addr_copy(vsi->back->hw.mac.addr, addr->sa_data);
+       if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
+               struct i40e_aqc_remove_macvlan_element_data element;
+
+               memset(&element, 0, sizeof(element));
+               ether_addr_copy(element.mac_addr, netdev->dev_addr);
+               element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+               i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+       } else {
+               i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
+                               false, false);
        }
 
-       /* In order to be sure to not drop any packets, add the new address
-        * then delete the old one.
-        */
-       f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
-       if (!f)
-               return -ENOMEM;
+       if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
+               struct i40e_aqc_add_macvlan_element_data element;
 
-       i40e_sync_vsi_filters(vsi);
-       i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
-       i40e_sync_vsi_filters(vsi);
+               memset(&element, 0, sizeof(element));
+               ether_addr_copy(element.mac_addr, hw->mac.addr);
+               element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
+               i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+       } else {
+               f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
+                                   false, false);
+               if (f)
+                       f->is_laa = true;
+       }
 
+       i40e_sync_vsi_filters(vsi);
        ether_addr_copy(netdev->dev_addr, addr->sa_data);
 
        return 0;
@@ -1362,10 +1469,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
  *
  * Setup VSI queue mapping for enabled traffic classes.
  **/
+#ifdef I40E_FCOE
+void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
+                             struct i40e_vsi_context *ctxt,
+                             u8 enabled_tc,
+                             bool is_add)
+#else
 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                                     struct i40e_vsi_context *ctxt,
                                     u8 enabled_tc,
                                     bool is_add)
+#endif
 {
        struct i40e_pf *pf = vsi->back;
        u16 sections = 0;
@@ -1411,6 +1525,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                        case I40E_VSI_MAIN:
                                qcount = min_t(int, pf->rss_size, num_tc_qps);
                                break;
+#ifdef I40E_FCOE
+                       case I40E_VSI_FCOE:
+                               qcount = num_tc_qps;
+                               break;
+#endif
                        case I40E_VSI_FDIR:
                        case I40E_VSI_SRIOV:
                        case I40E_VSI_VMDQ2:
@@ -1477,7 +1596,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
  * i40e_set_rx_mode - NDO callback to set the netdev filters
  * @netdev: network interface device structure
  **/
+#ifdef I40E_FCOE
+void i40e_set_rx_mode(struct net_device *netdev)
+#else
 static void i40e_set_rx_mode(struct net_device *netdev)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_mac_filter *f, *ftmp;
@@ -1700,9 +1823,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                kfree(add_list);
                add_list = NULL;
 
-               if (add_happened && (!aq_ret)) {
-                       /* do nothing */;
-               } else if (add_happened && (aq_ret)) {
+               if (add_happened && aq_ret &&
+                   pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
                        dev_info(&pf->pdev->dev,
                                 "add filter failed, err %d, aq_err %d\n",
                                 aq_ret, pf->hw.aq.asq_last_status);
@@ -2055,8 +2177,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
  *
  * net_device_ops implementation for adding vlan ids
  **/
+#ifdef I40E_FCOE
+int i40e_vlan_rx_add_vid(struct net_device *netdev,
+                        __always_unused __be16 proto, u16 vid)
+#else
 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
                                __always_unused __be16 proto, u16 vid)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -2089,8 +2216,13 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
  *
  * net_device_ops implementation for removing vlan ids
  **/
+#ifdef I40E_FCOE
+int i40e_vlan_rx_kill_vid(struct net_device *netdev,
+                         __always_unused __be16 proto, u16 vid)
+#else
 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
                                 __always_unused __be16 proto, u16 vid)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -2222,6 +2354,9 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
 
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
                err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
+#ifdef I40E_FCOE
+       i40e_fcoe_setup_ddp_resources(vsi);
+#endif
        return err;
 }
 
@@ -2241,6 +2376,9 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
        for (i = 0; i < vsi->num_queue_pairs; i++)
                if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
                        i40e_free_rx_resources(vsi->rx_rings[i]);
+#ifdef I40E_FCOE
+       i40e_fcoe_free_ddp_resources(vsi);
+#endif
 }
 
 /**
@@ -2282,6 +2420,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
        tx_ctx.qlen = ring->count;
        tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
                                               I40E_FLAG_FD_ATR_ENABLED));
+#ifdef I40E_FCOE
+       tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
+#endif
        tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
        /* FDIR VSI tx ring can still use RS bit and writebacks */
        if (vsi->type != I40E_VSI_FDIR)
@@ -2387,10 +2528,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
 
        rx_ctx.rxmax = min_t(u16, vsi->max_frame,
                                  (chain_len * ring->rx_buf_len));
-       rx_ctx.tphrdesc_ena = 1;
-       rx_ctx.tphwdesc_ena = 1;
-       rx_ctx.tphdata_ena = 1;
-       rx_ctx.tphhead_ena = 1;
        if (hw->revision_id == 0)
                rx_ctx.lrxqthresh = 0;
        else
@@ -2398,6 +2535,9 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        rx_ctx.crcstrip = 1;
        rx_ctx.l2tsel = 1;
        rx_ctx.showiv = 1;
+#ifdef I40E_FCOE
+       rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
+#endif
        /* set the prefena field to 1 because the manual says to */
        rx_ctx.prefena = 1;
 
@@ -2482,6 +2622,17 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
                break;
        }
 
+#ifdef I40E_FCOE
+       /* setup rx buffer for FCoE */
+       if ((vsi->type == I40E_VSI_FCOE) &&
+           (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
+               vsi->rx_hdr_len = 0;
+               vsi->rx_buf_len = I40E_RXBUFFER_3072;
+               vsi->max_frame = I40E_RXBUFFER_3072;
+               vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
+       }
+
+#endif /* I40E_FCOE */
        /* round up for the chip's needs */
        vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
                                (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
@@ -2755,6 +2906,22 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
        /* skip the flush */
 }
 
+/**
+ * i40e_irq_dynamic_disable - Disable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector
+ **/
+void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 val;
+
+       val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+       wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+       i40e_flush(hw);
+}
+
 /**
  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
  * @irq: interrupt number
@@ -3057,16 +3224,33 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
                /* clear next_to_watch to prevent false hangs */
                tx_buf->next_to_watch = NULL;
 
+               tx_desc->buffer_addr = 0;
+               tx_desc->cmd_type_offset_bsz = 0;
+               /* move past filter desc */
+               tx_buf++;
+               tx_desc++;
+               i++;
+               if (unlikely(!i)) {
+                       i -= tx_ring->count;
+                       tx_buf = tx_ring->tx_bi;
+                       tx_desc = I40E_TX_DESC(tx_ring, 0);
+               }
                /* unmap skb header data */
                dma_unmap_single(tx_ring->dev,
                                 dma_unmap_addr(tx_buf, dma),
                                 dma_unmap_len(tx_buf, len),
                                 DMA_TO_DEVICE);
+               if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
+                       kfree(tx_buf->raw_buf);
 
+               tx_buf->raw_buf = NULL;
+               tx_buf->tx_flags = 0;
+               tx_buf->next_to_watch = NULL;
                dma_unmap_len_set(tx_buf, len, 0);
+               tx_desc->buffer_addr = 0;
+               tx_desc->cmd_type_offset_bsz = 0;
 
-
-               /* move to the next desc and buffer to clean */
+               /* move us past the eop_desc for start of next FD desc */
                tx_buf++;
                tx_desc++;
                i++;
@@ -3151,8 +3335,12 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
 
        /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
         * group them so there are multiple queues per vector.
+        * It is also important to go through all the vectors available to be
+        * sure that if we don't use all the vectors, that the remaining vectors
+        * are cleared. This is especially important when decreasing the
+        * number of queues in use.
         */
-       for (; v_start < q_vectors && qp_remaining; v_start++) {
+       for (; v_start < q_vectors; v_start++) {
                struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
 
                num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
@@ -3205,7 +3393,11 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
  * This is used by netconsole to send skbs without having to re-enable
  * interrupts.  It's not called while the normal interrupt routine is executing.
  **/
+#ifdef I40E_FCOE
+void i40e_netpoll(struct net_device *netdev)
+#else
 static void i40e_netpoll(struct net_device *netdev)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -3227,6 +3419,35 @@ static void i40e_netpoll(struct net_device *netdev)
 }
 #endif
 
+/**
+ * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @enable: enable or disable state of the queue
+ *
+ * This routine will wait for the given Tx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ **/
+static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
+{
+       int i;
+       u32 tx_reg;
+
+       for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
+               tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
+               if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+                       break;
+
+               udelay(10);
+       }
+       if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
 /**
  * i40e_vsi_control_tx - Start or stop a VSI's rings
  * @vsi: the VSI being configured
@@ -3236,7 +3457,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
 {
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
-       int i, j, pf_q;
+       int i, j, pf_q, ret = 0;
        u32 tx_reg;
 
        pf_q = vsi->base_queue;
@@ -3269,22 +3490,46 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
                wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
 
                /* wait for the change to finish */
-               for (j = 0; j < 10; j++) {
-                       tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
-                       if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
-                               break;
-
-                       udelay(10);
-               }
-               if (j >= 10) {
-                       dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
-                                pf_q, (enable ? "en" : "dis"));
-                       return -ETIMEDOUT;
+               ret = i40e_pf_txq_wait(pf, pf_q, enable);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "%s: VSI seid %d Tx ring %d %sable timeout\n",
+                                __func__, vsi->seid, pf_q,
+                                (enable ? "en" : "dis"));
+                       break;
                }
        }
 
        if (hw->revision_id == 0)
                mdelay(50);
+       return ret;
+}
+
+/**
+ * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @enable: enable or disable state of the queue
+ *
+ * This routine will wait for the given Rx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ **/
+static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
+{
+       int i;
+       u32 rx_reg;
+
+       for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
+               rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
+               if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+                       break;
+
+               udelay(10);
+       }
+       if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
+               return -ETIMEDOUT;
 
        return 0;
 }
@@ -3298,7 +3543,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
 {
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
-       int i, j, pf_q;
+       int i, j, pf_q, ret = 0;
        u32 rx_reg;
 
        pf_q = vsi->base_queue;
@@ -3323,22 +3568,17 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
                wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
 
                /* wait for the change to finish */
-               for (j = 0; j < 10; j++) {
-                       rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
-
-                       if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
-                               break;
-
-                       udelay(10);
-               }
-               if (j >= 10) {
-                       dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
-                                pf_q, (enable ? "en" : "dis"));
-                       return -ETIMEDOUT;
+               ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "%s: VSI seid %d Rx ring %d %sable timeout\n",
+                                __func__, vsi->seid, pf_q,
+                                (enable ? "en" : "dis"));
+                       break;
                }
        }
 
-       return 0;
+       return ret;
 }
 
 /**
@@ -4107,12 +4347,20 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
                        continue;
 
                /* - Enable all TCs for the LAN VSI
+#ifdef I40E_FCOE
+                * - For FCoE VSI only enable the TC configured
+                *   as per the APP TLV
+#endif
                 * - For all others keep them at TC0 for now
                 */
                if (v == pf->lan_vsi)
                        tc_map = i40e_pf_get_tc_map(pf);
                else
                        tc_map = i40e_pf_get_default_tc(pf);
+#ifdef I40E_FCOE
+               if (pf->vsi[v]->type == I40E_VSI_FCOE)
+                       tc_map = i40e_get_fcoe_tc_map(pf);
+#endif /* #ifdef I40E_FCOE */
 
                ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
                if (ret) {
@@ -4193,13 +4441,13 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 
        switch (vsi->back->hw.phy.link_info.link_speed) {
        case I40E_LINK_SPEED_40GB:
-               strncpy(speed, "40 Gbps", SPEED_SIZE);
+               strlcpy(speed, "40 Gbps", SPEED_SIZE);
                break;
        case I40E_LINK_SPEED_10GB:
-               strncpy(speed, "10 Gbps", SPEED_SIZE);
+               strlcpy(speed, "10 Gbps", SPEED_SIZE);
                break;
        case I40E_LINK_SPEED_1GB:
-               strncpy(speed, "1000 Mbps", SPEED_SIZE);
+               strlcpy(speed, "1000 Mbps", SPEED_SIZE);
                break;
        default:
                break;
@@ -4207,16 +4455,16 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 
        switch (vsi->back->hw.fc.current_mode) {
        case I40E_FC_FULL:
-               strncpy(fc, "RX/TX", FC_SIZE);
+               strlcpy(fc, "RX/TX", FC_SIZE);
                break;
        case I40E_FC_TX_PAUSE:
-               strncpy(fc, "TX", FC_SIZE);
+               strlcpy(fc, "TX", FC_SIZE);
                break;
        case I40E_FC_RX_PAUSE:
-               strncpy(fc, "RX", FC_SIZE);
+               strlcpy(fc, "RX", FC_SIZE);
                break;
        default:
-               strncpy(fc, "None", FC_SIZE);
+               strlcpy(fc, "None", FC_SIZE);
                break;
        }
 
@@ -4231,8 +4479,12 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 static int i40e_up_complete(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
+       u8 set_fc_aq_fail = 0;
        int err;
 
+       /* force flow control off */
+       i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
+
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
                i40e_vsi_configure_msix(vsi);
        else
@@ -4254,11 +4506,26 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
                netif_carrier_on(vsi->netdev);
        } else if (vsi->netdev) {
                i40e_print_link_message(vsi, false);
+               /* need to check for qualified module here*/
+               if ((pf->hw.phy.link_info.link_info &
+                       I40E_AQ_MEDIA_AVAILABLE) &&
+                   (!(pf->hw.phy.link_info.an_info &
+                       I40E_AQ_QUALIFIED_MODULE)))
+                       netdev_err(vsi->netdev,
+                                  "the driver failed to link because an unqualified module was detected.");
        }
 
        /* replay FDIR SB filters */
-       if (vsi->type == I40E_VSI_FDIR)
+       if (vsi->type == I40E_VSI_FDIR) {
+               /* reset fd counters */
+               pf->fd_add_err = pf->fd_atr_cnt = 0;
+               if (pf->fd_tcp_rule > 0) {
+                       pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+                       dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
+                       pf->fd_tcp_rule = 0;
+               }
                i40e_fdir_filter_restore(vsi);
+       }
        i40e_service_event_schedule(pf);
 
        return 0;
@@ -4335,7 +4602,11 @@ void i40e_down(struct i40e_vsi *vsi)
  * @netdev: net device to configure
  * @tc: number of traffic classes to enable
  **/
+#ifdef I40E_FCOE
+int i40e_setup_tc(struct net_device *netdev, u8 tc)
+#else
 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -4400,7 +4671,11 @@ exit:
  *
  * Returns 0 on success, negative value on failure
  **/
+#ifdef I40E_FCOE
+int i40e_open(struct net_device *netdev)
+#else
 static int i40e_open(struct net_device *netdev)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -4536,7 +4811,11 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
  *
  * Returns 0, this is not allowed to fail
  **/
+#ifdef I40E_FCOE
+int i40e_close(struct net_device *netdev)
+#else
 static int i40e_close(struct net_device *netdev)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -4638,6 +4917,23 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                        }
                }
 
+               /* no further action needed, so return now */
+               return;
+       } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
+               int v;
+
+               /* Find the VSI(s) that needs to be brought down */
+               dev_info(&pf->pdev->dev, "VSI down requested\n");
+               for (v = 0; v < pf->num_alloc_vsi; v++) {
+                       struct i40e_vsi *vsi = pf->vsi[v];
+                       if (vsi != NULL &&
+                           test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
+                               set_bit(__I40E_DOWN, &vsi->state);
+                               i40e_down(vsi);
+                               clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
+                       }
+               }
+
                /* no further action needed, so return now */
                return;
        } else {
@@ -4845,7 +5141,20 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
 }
 
 /**
- * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW
+ * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
+ * @pf: board private structure
+ **/
+int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
+{
+       int val, fcnt_prog;
+
+       val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
+       fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
+       return fcnt_prog;
+}
+
+/**
+ * i40e_get_current_fd_count - Get the count of total FD filters programmed
  * @pf: board private structure
  **/
 int i40e_get_current_fd_count(struct i40e_pf *pf)
@@ -4866,15 +5175,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
 {
        u32 fcnt_prog, fcnt_avail;
 
+       if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+               return;
+
        /* Check if, FD SB or ATR was auto disabled and if there is enough room
         * to re-enable
         */
-       if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
-           (pf->flags & I40E_FLAG_FD_SB_ENABLED))
-               return;
-       fcnt_prog = i40e_get_current_fd_count(pf);
-       fcnt_avail = i40e_get_fd_cnt_all(pf);
-       if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
+       fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+       fcnt_avail = pf->fdir_pf_filter_count;
+       if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
+           (pf->fd_add_err == 0) ||
+           (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
                if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
                        pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
@@ -4891,23 +5202,84 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
        }
 }
 
+#define I40E_MIN_FD_FLUSH_INTERVAL 10
+/**
+ * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
+ * @pf: board private structure
+ **/
+static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
+{
+       int flush_wait_retry = 50;
+       int reg;
+
+       if (time_after(jiffies, pf->fd_flush_timestamp +
+                               (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
+               set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+               pf->fd_flush_timestamp = jiffies;
+               pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED;
+               pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+               /* flush all filters */
+               wr32(&pf->hw, I40E_PFQF_CTL_1,
+                    I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+               i40e_flush(&pf->hw);
+               pf->fd_flush_cnt++;
+               pf->fd_add_err = 0;
+               do {
+                       /* Check FD flush status every 5-6msec */
+                       usleep_range(5000, 6000);
+                       reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
+                       if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
+                               break;
+               } while (flush_wait_retry--);
+               if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
+                       dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
+               } else {
+                       /* replay sideband filters */
+                       i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+
+                       pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+                       pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+                       clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+                       dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+               }
+       }
+}
+
+/**
+ * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
+ * @pf: board private structure
+ **/
+int i40e_get_current_atr_cnt(struct i40e_pf *pf)
+{
+       return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
+}
+
+/* We can see up to 256 filter programming desc in transit if the filters are
+ * being applied really fast; before we see the first
+ * filter miss error on Rx queue 0. Accumulating enough error messages before
+ * reacting will make sure we don't cause flush too often.
+ */
+#define I40E_MAX_FD_PROGRAM_ERROR 256
+
 /**
  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
  * @pf: board private structure
  **/
 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
 {
-       if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
-               return;
 
        /* if interface is down do nothing */
        if (test_bit(__I40E_DOWN, &pf->state))
                return;
+
+       if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
+           (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
+           (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
+               i40e_fdir_flush_and_replay(pf);
+
        i40e_fdir_check_and_reenable(pf);
 
-       if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
-           (pf->flags & I40E_FLAG_FD_SB_ENABLED))
-               pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
 }
 
 /**
@@ -4917,11 +5289,14 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
  **/
 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
 {
-       if (!vsi)
+       if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
                return;
 
        switch (vsi->type) {
        case I40E_VSI_MAIN:
+#ifdef I40E_FCOE
+       case I40E_VSI_FCOE:
+#endif
                if (!vsi->netdev || !vsi->netdev_registered)
                        break;
 
@@ -5110,6 +5485,10 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
                reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
                clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
        }
+       if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
+               reset_flags |= (1 << __I40E_DOWN_REQUESTED);
+               clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
+       }
 
        /* If there's a recovery already waiting, it takes
         * precedence before starting a new reset sequence.
@@ -5146,6 +5525,13 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
        memcpy(&pf->hw.phy.link_info_old, hw_link_info,
               sizeof(pf->hw.phy.link_info_old));
 
+       /* check for unqualified module, if link is down */
+       if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+           (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
+           (!(status->link_info & I40E_AQ_LINK_UP)))
+               dev_err(&pf->pdev->dev,
+                       "The driver failed to link because an unqualified module was detected.\n");
+
        /* update link status */
        hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
        hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
@@ -5164,7 +5550,7 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
         * then see if the status changed while processing the
         * initial event.
         */
-       i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+       i40e_update_link_info(&pf->hw, true);
        i40e_link_event(pf);
 }
 
@@ -5182,7 +5568,8 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
        u32 oldval;
        u32 val;
 
-       if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
+       /* Do not run clean AQ when PF reset fails */
+       if (test_bit(__I40E_RESET_FAILED, &pf->state))
                return;
 
        /* check for error indications */
@@ -5228,10 +5615,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
        do {
                event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
                ret = i40e_clean_arq_element(hw, &event, &pending);
-               if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
-                       dev_info(&pf->pdev->dev, "No ARQ event found\n");
+               if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
                        break;
-               else if (ret) {
+               else if (ret) {
                        dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
                        break;
                }
@@ -5463,6 +5849,20 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
        struct i40e_vsi *vsi;
        int i;
 
+       /* quick workaround for an NVM issue that leaves a critical register
+        * uninitialized
+        */
+       if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
+               static const u32 hkey[] = {
+                       0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
+                       0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
+                       0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
+                       0x95b3a76d};
+
+               for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
+                       wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
+       }
+
        if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
                return;
 
@@ -5512,7 +5912,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
  *
  * Close up the VFs and other things in prep for pf Reset.
   **/
-static int i40e_prep_for_reset(struct i40e_pf *pf)
+static void i40e_prep_for_reset(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret = 0;
@@ -5520,7 +5920,7 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
 
        clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
        if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
-               return 0;
+               return;
 
        dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
 
@@ -5537,13 +5937,10 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
        /* call shutdown HMC */
        if (hw->hmc.hmc_obj) {
                ret = i40e_shutdown_lan_hmc(hw);
-               if (ret) {
+               if (ret)
                        dev_warn(&pf->pdev->dev,
                                 "shutdown_lan_hmc failed: %d\n", ret);
-                       clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
-               }
        }
-       return ret;
 }
 
 /**
@@ -5558,7 +5955,7 @@ static void i40e_send_version(struct i40e_pf *pf)
        dv.minor_version = DRV_VERSION_MINOR;
        dv.build_version = DRV_VERSION_BUILD;
        dv.subbuild_version = 0;
-       strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
+       strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
        i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
 }
 
@@ -5580,19 +5977,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        ret = i40e_pf_reset(hw);
        if (ret) {
                dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
-               goto end_core_reset;
+               set_bit(__I40E_RESET_FAILED, &pf->state);
+               goto clear_recovery;
        }
        pf->pfr_count++;
 
        if (test_bit(__I40E_DOWN, &pf->state))
-               goto end_core_reset;
+               goto clear_recovery;
        dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
 
        /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
        ret = i40e_init_adminq(&pf->hw);
        if (ret) {
                dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
-               goto end_core_reset;
+               goto clear_recovery;
        }
 
        /* re-verify the eeprom if we just had an EMP reset */
@@ -5629,7 +6027,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                goto end_core_reset;
        }
 #endif /* CONFIG_I40E_DCB */
+#ifdef I40E_FCOE
+       ret = i40e_init_pf_fcoe(pf);
+       if (ret)
+               dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
 
+#endif
        /* do basic switch setup */
        ret = i40e_setup_pf_switch(pf, reinit);
        if (ret)
@@ -5679,7 +6082,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        }
 
        if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
-               dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
+               dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
                /* no VEB, so rebuild only the Main VSI */
                ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
                if (ret) {
@@ -5705,6 +6108,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        i40e_send_version(pf);
 
 end_core_reset:
+       clear_bit(__I40E_RESET_FAILED, &pf->state);
+clear_recovery:
        clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
 }
 
@@ -5717,11 +6122,8 @@ end_core_reset:
  **/
 static void i40e_handle_reset_warning(struct i40e_pf *pf)
 {
-       i40e_status ret;
-
-       ret = i40e_prep_for_reset(pf);
-       if (!ret)
-               i40e_reset_and_rebuild(pf, false);
+       i40e_prep_for_reset(pf);
+       i40e_reset_and_rebuild(pf, false);
 }
 
 /**
@@ -5734,6 +6136,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        bool mdd_detected = false;
+       bool pf_mdd_detected = false;
        struct i40e_vf *vf;
        u32 reg;
        int i;
@@ -5744,33 +6147,55 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
        /* find what triggered the MDD event */
        reg = rd32(hw, I40E_GL_MDET_TX);
        if (reg & I40E_GL_MDET_TX_VALID_MASK) {
-               u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
-                               >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
-               u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
-                               >> I40E_GL_MDET_TX_EVENT_SHIFT;
-               u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
-                               >> I40E_GL_MDET_TX_QUEUE_SHIFT;
-               dev_info(&pf->pdev->dev,
-                        "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",
-                        event, queue, func);
+               u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
+                               I40E_GL_MDET_TX_PF_NUM_SHIFT;
+               u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
+                               I40E_GL_MDET_TX_VF_NUM_SHIFT;
+               u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >>
+                               I40E_GL_MDET_TX_EVENT_SHIFT;
+               u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+                               I40E_GL_MDET_TX_QUEUE_SHIFT;
+               if (netif_msg_tx_err(pf))
+                       dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
+                                event, queue, pf_num, vf_num);
                wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
                mdd_detected = true;
        }
        reg = rd32(hw, I40E_GL_MDET_RX);
        if (reg & I40E_GL_MDET_RX_VALID_MASK) {
-               u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
-                               >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
-               u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
-                               >> I40E_GL_MDET_RX_EVENT_SHIFT;
-               u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
-                               >> I40E_GL_MDET_RX_QUEUE_SHIFT;
-               dev_info(&pf->pdev->dev,
-                        "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
-                        event, queue, func);
+               u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
+                               I40E_GL_MDET_RX_FUNCTION_SHIFT;
+               u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >>
+                               I40E_GL_MDET_RX_EVENT_SHIFT;
+               u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+                               I40E_GL_MDET_RX_QUEUE_SHIFT;
+               if (netif_msg_rx_err(pf))
+                       dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
+                                event, queue, func);
                wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
                mdd_detected = true;
        }
 
+       if (mdd_detected) {
+               reg = rd32(hw, I40E_PF_MDET_TX);
+               if (reg & I40E_PF_MDET_TX_VALID_MASK) {
+                       wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
+                       dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
+                       pf_mdd_detected = true;
+               }
+               reg = rd32(hw, I40E_PF_MDET_RX);
+               if (reg & I40E_PF_MDET_RX_VALID_MASK) {
+                       wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
+                       dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
+                       pf_mdd_detected = true;
+               }
+               /* Queue belongs to the PF, initiate a reset */
+               if (pf_mdd_detected) {
+                       set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+                       i40e_service_event_schedule(pf);
+               }
+       }
+
        /* see if one of the VFs needs its hand slapped */
        for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
                vf = &(pf->vf[i]);
@@ -5778,14 +6203,16 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
                if (reg & I40E_VP_MDET_TX_VALID_MASK) {
                        wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
                        vf->num_mdd_events++;
-                       dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
+                       dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+                                i);
                }
 
                reg = rd32(hw, I40E_VP_MDET_RX(i));
                if (reg & I40E_VP_MDET_RX_VALID_MASK) {
                        wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
                        vf->num_mdd_events++;
-                       dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
+                       dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
+                                i);
                }
 
                if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
@@ -5860,6 +6287,12 @@ static void i40e_service_task(struct work_struct *work)
                                          service_task);
        unsigned long start_time = jiffies;
 
+       /* don't bother with service tasks if a reset is in progress */
+       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+               i40e_service_event_complete(pf);
+               return;
+       }
+
        i40e_reset_subtask(pf);
        i40e_handle_mdd_event(pf);
        i40e_vc_process_vflr_event(pf);
@@ -5938,6 +6371,15 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
                                      I40E_REQ_DESCRIPTOR_MULTIPLE);
                break;
 
+#ifdef I40E_FCOE
+       case I40E_VSI_FCOE:
+               vsi->alloc_queue_pairs = pf->num_fcoe_qps;
+               vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+                                     I40E_REQ_DESCRIPTOR_MULTIPLE);
+               vsi->num_q_vectors = pf->num_fcoe_msix;
+               break;
+
+#endif /* I40E_FCOE */
        default:
                WARN_ON(1);
                return -ENODATA;
@@ -5968,7 +6410,7 @@ static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
 
        if (alloc_qvectors) {
                /* allocate memory for q_vector pointers */
-               size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+               size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
                vsi->q_vectors = kzalloc(size, GFP_KERNEL);
                if (!vsi->q_vectors) {
                        ret = -ENOMEM;
@@ -6249,6 +6691,9 @@ static int i40e_init_msix(struct i40e_pf *pf)
         *              is governed by number of cpus in the system.
         *      - assumes symmetric Tx/Rx pairing
         *   - The number of VMDq pairs
+#ifdef I40E_FCOE
+        *   - The number of FCOE qps.
+#endif
         * Once we count this up, try the request.
         *
         * If we can't get what we want, we'll simplify to nearly nothing
@@ -6261,6 +6706,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
        if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
                v_budget++;
 
+#ifdef I40E_FCOE
+       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
+               pf->num_fcoe_msix = pf->num_fcoe_qps;
+               v_budget += pf->num_fcoe_msix;
+       }
+
+#endif
        /* Scale down if necessary, and the rings will share vectors */
        v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
 
@@ -6279,6 +6731,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
                 * of these features based on the policy and at the end disable
                 * the features that did not get any vectors.
                 */
+#ifdef I40E_FCOE
+               pf->num_fcoe_qps = 0;
+               pf->num_fcoe_msix = 0;
+#endif
                pf->num_vmdq_msix = 0;
        }
 
@@ -6309,9 +6765,24 @@ static int i40e_init_msix(struct i40e_pf *pf)
                        pf->num_lan_msix = 1;
                        break;
                case 3:
+#ifdef I40E_FCOE
+                       /* give one vector to FCoE */
+                       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
+                               pf->num_lan_msix = 1;
+                               pf->num_fcoe_msix = 1;
+                       }
+#else
                        pf->num_lan_msix = 2;
+#endif
                        break;
                default:
+#ifdef I40E_FCOE
+                       /* give one vector to FCoE */
+                       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
+                               pf->num_fcoe_msix = 1;
+                               vec--;
+                       }
+#endif
                        pf->num_lan_msix = min_t(int, (vec / 2),
                                                 pf->num_lan_qps);
                        pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
@@ -6325,6 +6796,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
                dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
                pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
        }
+#ifdef I40E_FCOE
+
+       if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
+               dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
+               pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
+       }
+#endif
        return err;
 }
 
@@ -6408,6 +6886,9 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
                err = i40e_init_msix(pf);
                if (err) {
                        pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
+#ifdef I40E_FCOE
+                                      I40E_FLAG_FCOE_ENABLED   |
+#endif
                                       I40E_FLAG_RSS_ENABLED    |
                                       I40E_FLAG_DCB_CAPABLE    |
                                       I40E_FLAG_SRIOV_ENABLED  |
@@ -6492,6 +6973,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
        u32 lut = 0;
        int i, j;
        u64 hena;
+       u32 reg_val;
 
        /* Fill out hash function seed */
        for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
@@ -6504,8 +6986,19 @@ static int i40e_config_rss(struct i40e_pf *pf)
        wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
        wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
 
+       /* Check capability and Set table size and register per hw expectation*/
+       reg_val = rd32(hw, I40E_PFQF_CTL_0);
+       if (hw->func_caps.rss_table_size == 512) {
+               reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
+               pf->rss_table_size = 512;
+       } else {
+               pf->rss_table_size = 128;
+               reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
+       }
+       wr32(hw, I40E_PFQF_CTL_0, reg_val);
+
        /* Populate the LUT with max no. of queues in round robin fashion */
-       for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
+       for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
 
                /* The assumption is that lan qp count will be the highest
                 * qp count for any PF VSI that needs RSS.
@@ -6592,13 +7085,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
         * maximum might end up larger than the available queues
         */
        pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+       pf->rss_size = 1;
        pf->rss_size_max = min_t(int, pf->rss_size_max,
                                 pf->hw.func_caps.num_tx_qp);
        if (pf->hw.func_caps.rss) {
                pf->flags |= I40E_FLAG_RSS_ENABLED;
                pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
-       } else {
-               pf->rss_size = 1;
        }
 
        /* MFP mode enabled */
@@ -6634,6 +7126,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
                pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
        }
 
+#ifdef I40E_FCOE
+       err = i40e_init_pf_fcoe(pf);
+       if (err)
+               dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
+
+#endif /* I40E_FCOE */
 #ifdef CONFIG_PCI_IOV
        if (pf->hw.func_caps.num_vfs) {
                pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -6670,6 +7168,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
        pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
        pf->irq_pile->search_hint = 0;
 
+       pf->tx_timeout_recovery_level = 1;
+
        mutex_init(&pf->switch_mutex);
 
 sw_init_done:
@@ -6702,9 +7202,16 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
                        i40e_fdir_filter_exit(pf);
                }
                pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
-               /* if ATR was disabled it can be re-enabled. */
-               if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
-                       pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+               pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+               /* reset fd counters */
+               pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
+               pf->fdir_pf_active_filters = 0;
+               pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+               dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+               /* if ATR was auto disabled it can be re-enabled. */
+               if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+                   (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
        }
        return need_reset;
 }
@@ -6833,6 +7340,22 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
 }
 
 #endif
+static int i40e_get_phys_port_id(struct net_device *netdev,
+                                struct netdev_phys_port_id *ppid)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+
+       if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
+               return -EOPNOTSUPP;
+
+       ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
+       memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
+
+       return 0;
+}
+
 #ifdef HAVE_FDB_OPS
 #ifdef USE_CONST_DEV_UC_CHAR
 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -6910,13 +7433,14 @@ static int i40e_ndo_fdb_del(struct ndmsg *ndm,
 static int i40e_ndo_fdb_dump(struct sk_buff *skb,
                             struct netlink_callback *cb,
                             struct net_device *dev,
+                            struct net_device *filter_dev,
                             int idx)
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_pf *pf = np->vsi->back;
 
        if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
-               idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+               idx = ndo_dflt_fdb_dump(skb, cb, dev, filter_dev, idx);
 
        return idx;
 }
@@ -6940,17 +7464,22 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_poll_controller    = i40e_netpoll,
 #endif
        .ndo_setup_tc           = i40e_setup_tc,
+#ifdef I40E_FCOE
+       .ndo_fcoe_enable        = i40e_fcoe_enable,
+       .ndo_fcoe_disable       = i40e_fcoe_disable,
+#endif
        .ndo_set_features       = i40e_set_features,
        .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
        .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
        .ndo_set_vf_rate        = i40e_ndo_set_vf_bw,
        .ndo_get_vf_config      = i40e_ndo_get_vf_config,
        .ndo_set_vf_link_state  = i40e_ndo_set_vf_link_state,
-       .ndo_set_vf_spoofchk    = i40e_ndo_set_vf_spoofck,
+       .ndo_set_vf_spoofchk    = i40e_ndo_set_vf_spoofchk,
 #ifdef CONFIG_I40E_VXLAN
        .ndo_add_vxlan_port     = i40e_add_vxlan_port,
        .ndo_del_vxlan_port     = i40e_del_vxlan_port,
 #endif
+       .ndo_get_phys_port_id   = i40e_get_phys_port_id,
 #ifdef HAVE_FDB_OPS
        .ndo_fdb_add            = i40e_ndo_fdb_add,
 #ifndef USE_DEFAULT_FDB_DEL_DUMP
@@ -7014,14 +7543,14 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        if (vsi->type == I40E_VSI_MAIN) {
                SET_NETDEV_DEV(netdev, &pf->pdev->dev);
                ether_addr_copy(mac_addr, hw->mac.perm_addr);
-               /* The following two steps are necessary to prevent reception
-                * of tagged packets - by default the NVM loads a MAC-VLAN
-                * filter that will accept any tagged packet.  This is to
-                * prevent that during normal operations until a specific
-                * VLAN tag filter has been set.
+               /* The following steps are necessary to prevent reception
+                * of tagged packets - some older NVM configurations load a
+                * default a MAC-VLAN filter that accepts any tagged packet
+                * which must be replaced by a normal filter.
                 */
-               i40e_rm_default_mac_filter(vsi, mac_addr);
-               i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
+               if (!i40e_rm_default_mac_filter(vsi, mac_addr))
+                       i40e_add_filter(vsi, mac_addr,
+                                       I40E_VLAN_ANY, false, true);
        } else {
                /* relate the VSI_VMDQ name to the VSI_MAIN name */
                snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -7047,6 +7576,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        netdev->netdev_ops = &i40e_netdev_ops;
        netdev->watchdog_timeo = 5 * HZ;
        i40e_set_ethtool_ops(netdev);
+#ifdef I40E_FCOE
+       i40e_fcoe_config_netdev(netdev, vsi);
+#endif
 
        return 0;
 }
@@ -7166,7 +7698,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                 * should be set to zero by default.
                 */
                ctxt.info.switch_id = 0;
-               ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
                ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
                /* Setup the VSI tx/rx queue map for TC0 only for now */
@@ -7200,6 +7731,16 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
                break;
 
+#ifdef I40E_FCOE
+       case I40E_VSI_FCOE:
+               ret = i40e_fcoe_vsi_init(vsi, &ctxt);
+               if (ret) {
+                       dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
+                       return ret;
+               }
+               break;
+
+#endif /* I40E_FCOE */
        default:
                return -ENODEV;
        }
@@ -7223,6 +7764,27 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
        list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
                f->changed = true;
                f_count++;
+
+               if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
+                       struct i40e_aqc_remove_macvlan_element_data element;
+
+                       memset(&element, 0, sizeof(element));
+                       ether_addr_copy(element.mac_addr, f->macaddr);
+                       element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+                       ret = i40e_aq_remove_macvlan(hw, vsi->seid,
+                                                    &element, 1, NULL);
+                       if (ret) {
+                               /* some older FW has a different default */
+                               element.flags |=
+                                              I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+                               i40e_aq_remove_macvlan(hw, vsi->seid,
+                                                      &element, 1, NULL);
+                       }
+
+                       i40e_aq_mac_address_write(hw,
+                                                 I40E_AQC_WRITE_TYPE_LAA_WOL,
+                                                 f->macaddr, NULL);
+               }
        }
        if (f_count) {
                vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
@@ -7552,6 +8114,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
        /* setup the netdev if needed */
        case I40E_VSI_MAIN:
        case I40E_VSI_VMDQ2:
+       case I40E_VSI_FCOE:
                ret = i40e_config_netdev(vsi);
                if (ret)
                        goto err_netdev;
@@ -8090,7 +8653,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
  **/
 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 {
-       u32 rxfc = 0, txfc = 0, rxfc_reg;
        int ret;
 
        /* find out what's out there already */
@@ -8150,68 +8712,13 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
                i40e_config_rss(pf);
 
        /* fill in link information and enable LSE reporting */
-       i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+       i40e_update_link_info(&pf->hw, true);
        i40e_link_event(pf);
 
        /* Initialize user-specific link properties */
        pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
                                  I40E_AQ_AN_COMPLETED) ? true : false);
-       /* requested_mode is set in probe or by ethtool */
-       if (!pf->fc_autoneg_status)
-               goto no_autoneg;
-
-       if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
-           (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
-               pf->hw.fc.current_mode = I40E_FC_FULL;
-       else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
-               pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
-       else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
-               pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
-       else
-               pf->hw.fc.current_mode = I40E_FC_NONE;
 
-       /* sync the flow control settings with the auto-neg values */
-       switch (pf->hw.fc.current_mode) {
-       case I40E_FC_FULL:
-               txfc = 1;
-               rxfc = 1;
-               break;
-       case I40E_FC_TX_PAUSE:
-               txfc = 1;
-               rxfc = 0;
-               break;
-       case I40E_FC_RX_PAUSE:
-               txfc = 0;
-               rxfc = 1;
-               break;
-       case I40E_FC_NONE:
-       case I40E_FC_DEFAULT:
-               txfc = 0;
-               rxfc = 0;
-               break;
-       case I40E_FC_PFC:
-               /* TBD */
-               break;
-       /* no default case, we have to handle all possibilities here */
-       }
-
-       wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
-
-       rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
-                  ~I40E_PRTDCB_MFLCN_RFCE_MASK;
-       rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
-
-       wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
-
-       goto fc_complete;
-
-no_autoneg:
-       /* disable L2 flow control, user can turn it on if they wish */
-       wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
-       wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
-                                        ~I40E_PRTDCB_MFLCN_RFCE_MASK);
-
-fc_complete:
        i40e_ptp_init(pf);
 
        return ret;
@@ -8226,6 +8733,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
        int queues_left;
 
        pf->num_lan_qps = 0;
+#ifdef I40E_FCOE
+       pf->num_fcoe_qps = 0;
+#endif
 
        /* Find the max queues to be put into basic use.  We'll always be
         * using TC0, whether or not DCB is running, and TC0 will get the
@@ -8241,6 +8751,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
 
                /* make sure all the fancies are disabled */
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
+#ifdef I40E_FCOE
+                              I40E_FLAG_FCOE_ENABLED   |
+#endif
                               I40E_FLAG_FD_SB_ENABLED  |
                               I40E_FLAG_FD_ATR_ENABLED |
                               I40E_FLAG_DCB_CAPABLE    |
@@ -8255,6 +8768,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                queues_left -= pf->num_lan_qps;
 
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
+#ifdef I40E_FCOE
+                              I40E_FLAG_FCOE_ENABLED   |
+#endif
                               I40E_FLAG_FD_SB_ENABLED  |
                               I40E_FLAG_FD_ATR_ENABLED |
                               I40E_FLAG_DCB_ENABLED    |
@@ -8270,6 +8786,22 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                queues_left -= pf->num_lan_qps;
        }
 
+#ifdef I40E_FCOE
+       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
+               if (I40E_DEFAULT_FCOE <= queues_left) {
+                       pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
+               } else if (I40E_MINIMUM_FCOE <= queues_left) {
+                       pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
+               } else {
+                       pf->num_fcoe_qps = 0;
+                       pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
+                       dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
+               }
+
+               queues_left -= pf->num_fcoe_qps;
+       }
+
+#endif
        if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
                if (queues_left > 1) {
                        queues_left -= 1; /* save 1 queue for FD */
@@ -8294,6 +8826,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
        }
 
        pf->queues_left = queues_left;
+#ifdef I40E_FCOE
+       dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
+#endif
 }
 
 /**
@@ -8360,6 +8895,10 @@ static void i40e_print_features(struct i40e_pf *pf)
                buf += sprintf(buf, "DCB ");
        if (pf->flags & I40E_FLAG_PTP)
                buf += sprintf(buf, "PTP ");
+#ifdef I40E_FCOE
+       if (pf->flags & I40E_FLAG_FCOE_ENABLED)
+               buf += sprintf(buf, "FCOE ");
+#endif
 
        BUG_ON(buf > (string + INFO_STRING_LEN));
        dev_info(&pf->pdev->dev, "%s\n", string);
@@ -8460,6 +8999,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* Reset here to make sure all is clean and to define PF 'n' */
+       i40e_clear_hw(hw);
        err = i40e_pf_reset(hw);
        if (err) {
                dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
@@ -8489,12 +9029,20 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
        if (err) {
                dev_info(&pdev->dev,
-                        "init_adminq failed: %d expecting API %02x.%02x\n",
-                        err,
-                        I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
+                        "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
                goto err_pf_reset;
        }
 
+       if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+           hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
+               dev_info(&pdev->dev,
+                        "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+       else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
+                hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
+               dev_info(&pdev->dev,
+                        "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+
+
        i40e_verify_eeprom(pf);
 
        /* Rev 0 hardware was never productized */
@@ -8535,6 +9083,21 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
        ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
+       i40e_get_port_mac_addr(hw, hw->mac.port_addr);
+       if (is_valid_ether_addr(hw->mac.port_addr))
+               pf->flags |= I40E_FLAG_PORT_ID_VALID;
+#ifdef I40E_FCOE
+       err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
+       if (err)
+               dev_info(&pdev->dev,
+                        "(non-fatal) SAN MAC retrieval failed: %d\n", err);
+       if (!is_valid_ether_addr(hw->mac.san_addr)) {
+               dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
+                        hw->mac.san_addr);
+               ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
+       }
+       dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
+#endif /* I40E_FCOE */
 
        pci_set_drvdata(pdev, pf);
        pci_save_state(pdev);
@@ -8651,6 +9214,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        mod_timer(&pf->service_timer,
                  round_jiffies(jiffies + pf->service_timer_period));
 
+#ifdef I40E_FCOE
+       /* create FCoE interface */
+       i40e_fcoe_vsi_setup(pf);
+
+#endif
        /* Get the negotiated link width and speed from PCI config space */
        pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
 
@@ -8722,7 +9290,6 @@ static void i40e_remove(struct pci_dev *pdev)
 {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
        i40e_status ret_code;
-       u32 reg;
        int i;
 
        i40e_dbg_pf_exit(pf);
@@ -8800,11 +9367,6 @@ static void i40e_remove(struct pci_dev *pdev)
        kfree(pf->irq_pile);
        kfree(pf->vsi);
 
-       /* force a PF reset to clean anything leftover */
-       reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
-       wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
-       i40e_flush(&pf->hw);
-
        iounmap(pf->hw.hw_addr);
        kfree(pf);
        pci_release_selected_regions(pdev,