Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 9 Jun 2015 03:06:56 +0000 (20:06 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 9 Jun 2015 03:06:56 +0000 (20:06 -0700)
13 files changed:
1  2 
MAINTAINERS
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
lib/rhashtable.c
net/bridge/br_fdb.c
net/core/dev.c

diff --combined MAINTAINERS
index 3a4b7cbbaa72e1446688e4d14e1dae68a7f5b1bc,d8afd29536786b0907c795afeb8cd5aa3ff2d3b6..5bb0b9e3059f8ed3526133a774495c33da17a6b0
@@@ -51,9 -51,9 +51,9 @@@ trivial patch so apply some common sens
        or does something very odd once a month document it.
  
        PLEASE remember that submissions must be made under the terms
-       of the OSDL certificate of contribution and should include a
-       Signed-off-by: line.  The current version of this "Developer's
-       Certificate of Origin" (DCO) is listed in the file
+       of the Linux Foundation certificate of contribution and should
+       include a Signed-off-by: line.  The current version of this
+       "Developer's Certificate of Origin" (DCO) is listed in the file
        Documentation/SubmittingPatches.
  
  6.    Make sure you have the right to send any changes you make. If you
@@@ -652,6 -652,7 +652,6 @@@ M: Tom Lendacky <thomas.lendacky@amd.co
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/amd/xgbe/
 -F:    drivers/net/phy/amd-xgbe-phy.c
  
  AMS (Apple Motion Sensor) DRIVER
  M:    Michael Hanselmann <linux-kernel@hansmi.ch>
@@@ -921,13 -922,6 +921,13 @@@ M:       Krzysztof Halasa <khalasa@piap.pl
  S:    Maintained
  F:    arch/arm/mach-cns3xxx/
  
 +ARM/CAVIUM THUNDER NETWORK DRIVER
 +M:    Sunil Goutham <sgoutham@cavium.com>
 +M:    Robert Richter <rric@kernel.org>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Supported
 +F:    drivers/net/ethernet/cavium/
 +
  ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE
  M:    Alexander Shiyan <shc_work@mail.ru>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@@ -6372,12 -6366,6 +6372,12 @@@ F:    include/uapi/linux/meye.
  F:    include/uapi/linux/ivtv*
  F:    include/uapi/linux/uvcvideo.h
  
 +MEDIATEK MT7601U WIRELESS LAN DRIVER
 +M:    Jakub Kicinski <kubakici@wp.pl>
 +L:    linux-wireless@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/wireless/mediatek/mt7601u/
 +
  MEGARAID SCSI/SAS DRIVERS
  M:    Kashyap Desai <kashyap.desai@avagotech.com>
  M:    Sumit Saxena <sumit.saxena@avagotech.com>
@@@ -7587,6 -7575,7 +7587,7 @@@ F:      drivers/pci/host/pci-exynos.
  
  PCI DRIVER FOR SYNOPSIS DESIGNWARE
  M:    Jingoo Han <jingoohan1@gmail.com>
+ M:    Pratyush Anand <pratyush.anand@gmail.com>
  L:    linux-pci@vger.kernel.org
  S:    Maintained
  F:    drivers/pci/host/*designware*
@@@ -7600,8 -7589,9 +7601,9 @@@ F:      Documentation/devicetree/bindings/pc
  F:    drivers/pci/host/pci-host-generic.c
  
  PCIE DRIVER FOR ST SPEAR13XX
+ M:    Pratyush Anand <pratyush.anand@gmail.com>
  L:    linux-pci@vger.kernel.org
- S:    Orphan
+ S:    Maintained
  F:    drivers/pci/host/*spear*
  
  PCMCIA SUBSYSTEM
index c752049097029588f8cf5446417010a9afde5d55,9fd6c69a8bac3c77d1c0c6e99eb4f3644561f78a..1e9c28d19ef88ccd4e0c8b45bf7ee9351c03935c
@@@ -183,10 -183,9 +183,10 @@@ static int xgbe_alloc_channels(struct x
                        channel->rx_ring = rx_ring++;
                }
  
 -              DBGPR("  %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
 -                    channel->name, channel->queue_index, channel->dma_regs,
 -                    channel->dma_irq, channel->tx_ring, channel->rx_ring);
 +              netif_dbg(pdata, drv, pdata->netdev,
 +                        "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
 +                        channel->name, channel->dma_regs, channel->dma_irq,
 +                        channel->tx_ring, channel->rx_ring);
        }
  
        pdata->channel = channel_mem;
@@@ -236,8 -235,7 +236,8 @@@ static int xgbe_maybe_stop_tx_queue(str
        struct xgbe_prv_data *pdata = channel->pdata;
  
        if (count > xgbe_tx_avail_desc(ring)) {
 -              DBGPR("  Tx queue stopped, not enough descriptors available\n");
 +              netif_info(pdata, drv, pdata->netdev,
 +                         "Tx queue stopped, not enough descriptors available\n");
                netif_stop_subqueue(pdata->netdev, channel->queue_index);
                ring->tx.queue_stopped = 1;
  
@@@ -332,7 -330,7 +332,7 @@@ static irqreturn_t xgbe_isr(int irq, vo
        if (!dma_isr)
                goto isr_done;
  
 -      DBGPR("  DMA_ISR = %08x\n", dma_isr);
 +      netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
  
        for (i = 0; i < pdata->channel_count; i++) {
                if (!(dma_isr & (1 << i)))
                channel = pdata->channel + i;
  
                dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
 -              DBGPR("  DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
 +              netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
 +                        i, dma_ch_isr);
  
                /* The TI or RI interrupt bits may still be set even if using
                 * per channel DMA interrupts. Check to be sure those are not
                }
        }
  
 -      DBGPR("  DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
 -
  isr_done:
        return IRQ_HANDLED;
  }
@@@ -424,7 -423,7 +424,7 @@@ static void xgbe_tx_timer(unsigned lon
        if (napi_schedule_prep(napi)) {
                /* Disable Tx and Rx interrupts */
                if (pdata->per_channel_irq)
-                       disable_irq(channel->dma_irq);
+                       disable_irq_nosync(channel->dma_irq);
                else
                        xgbe_disable_rx_tx_ints(pdata);
  
        DBGPR("<--xgbe_tx_timer\n");
  }
  
 -static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
 +static void xgbe_service(struct work_struct *work)
 +{
 +      struct xgbe_prv_data *pdata = container_of(work,
 +                                                 struct xgbe_prv_data,
 +                                                 service_work);
 +
 +      pdata->phy_if.phy_status(pdata);
 +}
 +
 +static void xgbe_service_timer(unsigned long data)
 +{
 +      struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
 +
 +      schedule_work(&pdata->service_work);
 +
 +      mod_timer(&pdata->service_timer, jiffies + HZ);
 +}
 +
 +static void xgbe_init_timers(struct xgbe_prv_data *pdata)
  {
        struct xgbe_channel *channel;
        unsigned int i;
  
 -      DBGPR("-->xgbe_init_tx_timers\n");
 +      setup_timer(&pdata->service_timer, xgbe_service_timer,
 +                  (unsigned long)pdata);
  
        channel = pdata->channel;
        for (i = 0; i < pdata->channel_count; i++, channel++) {
                if (!channel->tx_ring)
                        break;
  
 -              DBGPR("  %s adding tx timer\n", channel->name);
                setup_timer(&channel->tx_timer, xgbe_tx_timer,
                            (unsigned long)channel);
        }
 +}
  
 -      DBGPR("<--xgbe_init_tx_timers\n");
 +static void xgbe_start_timers(struct xgbe_prv_data *pdata)
 +{
 +      mod_timer(&pdata->service_timer, jiffies + HZ);
  }
  
 -static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
 +static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
  {
        struct xgbe_channel *channel;
        unsigned int i;
  
 -      DBGPR("-->xgbe_stop_tx_timers\n");
 +      del_timer_sync(&pdata->service_timer);
  
        channel = pdata->channel;
        for (i = 0; i < pdata->channel_count; i++, channel++) {
                if (!channel->tx_ring)
                        break;
  
 -              DBGPR("  %s deleting tx timer\n", channel->name);
                del_timer_sync(&channel->tx_timer);
        }
 -
 -      DBGPR("<--xgbe_stop_tx_timers\n");
  }
  
  void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
                                                RXFIFOSIZE);
        hw_feat->tx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
                                                TXFIFOSIZE);
 +      hw_feat->adv_ts_hi     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
        hw_feat->dma_width     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
        hw_feat->dcb           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
        hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
@@@ -779,12 -759,112 +779,12 @@@ static void xgbe_free_rx_data(struct xg
        DBGPR("<--xgbe_free_rx_data\n");
  }
  
 -static void xgbe_adjust_link(struct net_device *netdev)
 -{
 -      struct xgbe_prv_data *pdata = netdev_priv(netdev);
 -      struct xgbe_hw_if *hw_if = &pdata->hw_if;
 -      struct phy_device *phydev = pdata->phydev;
 -      int new_state = 0;
 -
 -      if (!phydev)
 -              return;
 -
 -      if (phydev->link) {
 -              /* Flow control support */
 -              if (pdata->pause_autoneg) {
 -                      if (phydev->pause || phydev->asym_pause) {
 -                              pdata->tx_pause = 1;
 -                              pdata->rx_pause = 1;
 -                      } else {
 -                              pdata->tx_pause = 0;
 -                              pdata->rx_pause = 0;
 -                      }
 -              }
 -
 -              if (pdata->tx_pause != pdata->phy_tx_pause) {
 -                      hw_if->config_tx_flow_control(pdata);
 -                      pdata->phy_tx_pause = pdata->tx_pause;
 -              }
 -
 -              if (pdata->rx_pause != pdata->phy_rx_pause) {
 -                      hw_if->config_rx_flow_control(pdata);
 -                      pdata->phy_rx_pause = pdata->rx_pause;
 -              }
 -
 -              /* Speed support */
 -              if (phydev->speed != pdata->phy_speed) {
 -                      new_state = 1;
 -
 -                      switch (phydev->speed) {
 -                      case SPEED_10000:
 -                              hw_if->set_xgmii_speed(pdata);
 -                              break;
 -
 -                      case SPEED_2500:
 -                              hw_if->set_gmii_2500_speed(pdata);
 -                              break;
 -
 -                      case SPEED_1000:
 -                              hw_if->set_gmii_speed(pdata);
 -                              break;
 -                      }
 -                      pdata->phy_speed = phydev->speed;
 -              }
 -
 -              if (phydev->link != pdata->phy_link) {
 -                      new_state = 1;
 -                      pdata->phy_link = 1;
 -              }
 -      } else if (pdata->phy_link) {
 -              new_state = 1;
 -              pdata->phy_link = 0;
 -              pdata->phy_speed = SPEED_UNKNOWN;
 -      }
 -
 -      if (new_state)
 -              phy_print_status(phydev);
 -}
 -
  static int xgbe_phy_init(struct xgbe_prv_data *pdata)
  {
 -      struct net_device *netdev = pdata->netdev;
 -      struct phy_device *phydev = pdata->phydev;
 -      int ret;
 -
        pdata->phy_link = -1;
        pdata->phy_speed = SPEED_UNKNOWN;
 -      pdata->phy_tx_pause = pdata->tx_pause;
 -      pdata->phy_rx_pause = pdata->rx_pause;
  
 -      ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
 -                               pdata->phy_mode);
 -      if (ret) {
 -              netdev_err(netdev, "phy_connect_direct failed\n");
 -              return ret;
 -      }
 -
 -      if (!phydev->drv || (phydev->drv->phy_id == 0)) {
 -              netdev_err(netdev, "phy_id not valid\n");
 -              ret = -ENODEV;
 -              goto err_phy_connect;
 -      }
 -      DBGPR("  phy_connect_direct succeeded for PHY %s, link=%d\n",
 -            dev_name(&phydev->dev), phydev->link);
 -
 -      return 0;
 -
 -err_phy_connect:
 -      phy_disconnect(phydev);
 -
 -      return ret;
 -}
 -
 -static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
 -{
 -      if (!pdata->phydev)
 -              return;
 -
 -      phy_disconnect(pdata->phydev);
 +      return pdata->phy_if.phy_reset(pdata);
  }
  
  int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
  
        netif_tx_stop_all_queues(netdev);
  
 +      xgbe_stop_timers(pdata);
 +      flush_workqueue(pdata->dev_workqueue);
 +
        hw_if->powerdown_tx(pdata);
        hw_if->powerdown_rx(pdata);
  
        xgbe_napi_disable(pdata, 0);
  
 -      phy_stop(pdata->phydev);
 -
        pdata->power_down = 1;
  
        spin_unlock_irqrestore(&pdata->lock, flags);
@@@ -845,6 -924,8 +845,6 @@@ int xgbe_powerup(struct net_device *net
  
        pdata->power_down = 0;
  
 -      phy_start(pdata->phydev);
 -
        xgbe_napi_enable(pdata, 0);
  
        hw_if->powerup_tx(pdata);
  
        netif_tx_start_all_queues(netdev);
  
 +      xgbe_start_timers(pdata);
 +
        spin_unlock_irqrestore(&pdata->lock, flags);
  
        DBGPR("<--xgbe_powerup\n");
  static int xgbe_start(struct xgbe_prv_data *pdata)
  {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
 +      struct xgbe_phy_if *phy_if = &pdata->phy_if;
        struct net_device *netdev = pdata->netdev;
        int ret;
  
  
        hw_if->init(pdata);
  
 -      phy_start(pdata->phydev);
 +      ret = phy_if->phy_start(pdata);
 +      if (ret)
 +              goto err_phy;
  
        xgbe_napi_enable(pdata, 1);
  
        hw_if->enable_tx(pdata);
        hw_if->enable_rx(pdata);
  
 -      xgbe_init_tx_timers(pdata);
 -
        netif_tx_start_all_queues(netdev);
  
 +      xgbe_start_timers(pdata);
 +      schedule_work(&pdata->service_work);
 +
        DBGPR("<--xgbe_start\n");
  
        return 0;
  err_napi:
        xgbe_napi_disable(pdata, 1);
  
 -      phy_stop(pdata->phydev);
 +      phy_if->phy_stop(pdata);
  
 +err_phy:
        hw_if->exit(pdata);
  
        return ret;
  static void xgbe_stop(struct xgbe_prv_data *pdata)
  {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
 +      struct xgbe_phy_if *phy_if = &pdata->phy_if;
        struct xgbe_channel *channel;
        struct net_device *netdev = pdata->netdev;
        struct netdev_queue *txq;
  
        netif_tx_stop_all_queues(netdev);
  
 -      xgbe_stop_tx_timers(pdata);
 +      xgbe_stop_timers(pdata);
 +      flush_workqueue(pdata->dev_workqueue);
  
        hw_if->disable_tx(pdata);
        hw_if->disable_rx(pdata);
  
        xgbe_napi_disable(pdata, 1);
  
 -      phy_stop(pdata->phydev);
 +      phy_if->phy_stop(pdata);
  
        hw_if->exit(pdata);
  
@@@ -1302,7 -1374,7 +1302,7 @@@ static int xgbe_open(struct net_device 
        ret = clk_prepare_enable(pdata->sysclk);
        if (ret) {
                netdev_alert(netdev, "dma clk_prepare_enable failed\n");
 -              goto err_phy_init;
 +              return ret;
        }
  
        ret = clk_prepare_enable(pdata->ptpclk);
        if (ret)
                goto err_channels;
  
 -      /* Initialize the device restart and Tx timestamp work struct */
 +      INIT_WORK(&pdata->service_work, xgbe_service);
        INIT_WORK(&pdata->restart_work, xgbe_restart);
        INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
 +      xgbe_init_timers(pdata);
  
        ret = xgbe_start(pdata);
        if (ret)
                goto err_rings;
  
 +      clear_bit(XGBE_DOWN, &pdata->dev_state);
 +
        DBGPR("<--xgbe_open\n");
  
        return 0;
@@@ -1354,6 -1423,9 +1354,6 @@@ err_ptpclk
  err_sysclk:
        clk_disable_unprepare(pdata->sysclk);
  
 -err_phy_init:
 -      xgbe_phy_exit(pdata);
 -
        return ret;
  }
  
@@@ -1377,7 -1449,8 +1377,7 @@@ static int xgbe_close(struct net_devic
        clk_disable_unprepare(pdata->ptpclk);
        clk_disable_unprepare(pdata->sysclk);
  
 -      /* Release the phy */
 -      xgbe_phy_exit(pdata);
 +      set_bit(XGBE_DOWN, &pdata->dev_state);
  
        DBGPR("<--xgbe_close\n");
  
@@@ -1405,8 -1478,7 +1405,8 @@@ static int xgbe_xmit(struct sk_buff *sk
        ret = NETDEV_TX_OK;
  
        if (skb->len == 0) {
 -              netdev_err(netdev, "empty skb received from stack\n");
 +              netif_err(pdata, tx_err, netdev,
 +                        "empty skb received from stack\n");
                dev_kfree_skb_any(skb);
                goto tx_netdev_return;
        }
  
        ret = xgbe_prep_tso(skb, packet);
        if (ret) {
 -              netdev_err(netdev, "error processing TSO packet\n");
 +              netif_err(pdata, tx_err, netdev,
 +                        "error processing TSO packet\n");
                dev_kfree_skb_any(skb);
                goto tx_netdev_return;
        }
        /* Configure required descriptor fields for transmission */
        hw_if->dev_xmit(channel);
  
 -#ifdef XGMAC_ENABLE_TX_PKT_DUMP
 -      xgbe_print_pkt(netdev, skb, true);
 -#endif
 +      if (netif_msg_pktdata(pdata))
 +              xgbe_print_pkt(netdev, skb, true);
  
        /* Stop the queue in advance if there may not be enough descriptors */
        xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
@@@ -1638,8 -1710,7 +1638,8 @@@ static int xgbe_setup_tc(struct net_dev
                               (pdata->q2tc_map[queue] == i))
                                queue++;
  
 -                      DBGPR("  TC%u using TXq%u-%u\n", i, offset, queue - 1);
 +                      netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n",
 +                                i, offset, queue - 1);
                        netdev_set_tc_queue(netdev, i, queue - offset, offset);
                        offset = queue;
                }
@@@ -1749,10 -1820,9 +1749,10 @@@ static void xgbe_rx_refresh(struct xgbe
                          lower_32_bits(rdata->rdesc_dma));
  }
  
 -static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
 +static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
 +                                     struct napi_struct *napi,
                                       struct xgbe_ring_data *rdata,
 -                                     unsigned int *len)
 +                                     unsigned int len)
  {
        struct sk_buff *skb;
        u8 *packet;
        if (!skb)
                return NULL;
  
 +      /* Start with the header buffer which may contain just the header
 +       * or the header plus data
 +       */
 +      dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
 +                              rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
 +
        packet = page_address(rdata->rx.hdr.pa.pages) +
                 rdata->rx.hdr.pa.pages_offset;
 -      copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
 +      copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
        copy_len = min(rdata->rx.hdr.dma_len, copy_len);
        skb_copy_to_linear_data(skb, packet, copy_len);
        skb_put(skb, copy_len);
  
 -      *len -= copy_len;
 +      len -= copy_len;
 +      if (len) {
 +              /* Add the remaining data as a frag */
 +              dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
 +                                      rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
 +
 +              skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 +                              rdata->rx.buf.pa.pages,
 +                              rdata->rx.buf.pa.pages_offset,
 +                              len, rdata->rx.buf.dma_len);
 +              rdata->rx.buf.pa.pages = NULL;
 +      }
  
        return skb;
  }
@@@ -1824,8 -1877,9 +1824,8 @@@ static int xgbe_tx_poll(struct xgbe_cha
                 * bit */
                dma_rmb();
  
 -#ifdef XGMAC_ENABLE_TX_DESC_DUMP
 -              xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
 -#endif
 +              if (netif_msg_tx_done(pdata))
 +                      xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
  
                if (hw_if->is_last_desc(rdesc)) {
                        tx_packets += rdata->tx.packets;
@@@ -1868,7 -1922,7 +1868,7 @@@ static int xgbe_rx_poll(struct xgbe_cha
        struct sk_buff *skb;
        struct skb_shared_hwtstamps *hwtstamps;
        unsigned int incomplete, error, context_next, context;
 -      unsigned int len, put_len, max_len;
 +      unsigned int len, rdesc_len, max_len;
        unsigned int received = 0;
        int packet_count = 0;
  
        if (!ring)
                return 0;
  
 +      incomplete = 0;
 +      context_next = 0;
 +
        napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
  
        rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
  
                /* First time in loop see if we need to restore state */
                if (!received && rdata->state_saved) {
 -                      incomplete = rdata->state.incomplete;
 -                      context_next = rdata->state.context_next;
                        skb = rdata->state.skb;
                        error = rdata->state.error;
                        len = rdata->state.len;
                } else {
                        memset(packet, 0, sizeof(*packet));
 -                      incomplete = 0;
 -                      context_next = 0;
                        skb = NULL;
                        error = 0;
                        len = 0;
@@@ -1928,23 -1983,29 +1928,23 @@@ read_again
  
                if (error || packet->errors) {
                        if (packet->errors)
 -                              DBGPR("Error in received packet\n");
 +                              netif_err(pdata, rx_err, netdev,
 +                                        "error in received packet\n");
                        dev_kfree_skb(skb);
                        goto next_packet;
                }
  
                if (!context) {
 -                      put_len = rdata->rx.len - len;
 -                      len += put_len;
 -
 -                      if (!skb) {
 -                              dma_sync_single_for_cpu(pdata->dev,
 -                                                      rdata->rx.hdr.dma,
 -                                                      rdata->rx.hdr.dma_len,
 -                                                      DMA_FROM_DEVICE);
 -
 -                              skb = xgbe_create_skb(napi, rdata, &put_len);
 -                              if (!skb) {
 +                      /* Length is cumulative, get this descriptor's length */
 +                      rdesc_len = rdata->rx.len - len;
 +                      len += rdesc_len;
 +
 +                      if (rdesc_len && !skb) {
 +                              skb = xgbe_create_skb(pdata, napi, rdata,
 +                                                    rdesc_len);
 +                              if (!skb)
                                        error = 1;
 -                                      goto skip_data;
 -                              }
 -                      }
 -
 -                      if (put_len) {
 +                      } else if (rdesc_len) {
                                dma_sync_single_for_cpu(pdata->dev,
                                                        rdata->rx.buf.dma,
                                                        rdata->rx.buf.dma_len,
                                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
                                                rdata->rx.buf.pa.pages,
                                                rdata->rx.buf.pa.pages_offset,
 -                                              put_len, rdata->rx.buf.dma_len);
 +                                              rdesc_len,
 +                                              rdata->rx.buf.dma_len);
                                rdata->rx.buf.pa.pages = NULL;
                        }
                }
  
 -skip_data:
                if (incomplete || context_next)
                        goto read_again;
  
                        max_len += VLAN_HLEN;
  
                if (skb->len > max_len) {
 -                      DBGPR("packet length exceeds configured MTU\n");
 +                      netif_err(pdata, rx_err, netdev,
 +                                "packet length exceeds configured MTU\n");
                        dev_kfree_skb(skb);
                        goto next_packet;
                }
  
 -#ifdef XGMAC_ENABLE_RX_PKT_DUMP
 -              xgbe_print_pkt(netdev, skb, false);
 -#endif
 +              if (netif_msg_pktdata(pdata))
 +                      xgbe_print_pkt(netdev, skb, false);
  
                skb_checksum_none_assert(skb);
                if (XGMAC_GET_BITS(packet->attributes,
                skb_record_rx_queue(skb, channel->queue_index);
                skb_mark_napi_id(skb, napi);
  
 -              netdev->last_rx = jiffies;
                napi_gro_receive(napi, skb);
  
  next_packet:
        if (received && (incomplete || context_next)) {
                rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
                rdata->state_saved = 1;
 -              rdata->state.incomplete = incomplete;
 -              rdata->state.context_next = context_next;
                rdata->state.skb = skb;
                rdata->state.len = len;
                rdata->state.error = error;
@@@ -2101,8 -2165,8 +2101,8 @@@ static int xgbe_all_poll(struct napi_st
        return processed;
  }
  
 -void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
 -                     unsigned int count, unsigned int flag)
 +void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
 +                     unsigned int idx, unsigned int count, unsigned int flag)
  {
        struct xgbe_ring_data *rdata;
        struct xgbe_ring_desc *rdesc;
        while (count--) {
                rdata = XGBE_GET_DESC_DATA(ring, idx);
                rdesc = rdata->rdesc;
 -              pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
 -                       (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
 -                       le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
 -                       le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
 +              netdev_dbg(pdata->netdev,
 +                         "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
 +                         (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
 +                         le32_to_cpu(rdesc->desc0),
 +                         le32_to_cpu(rdesc->desc1),
 +                         le32_to_cpu(rdesc->desc2),
 +                         le32_to_cpu(rdesc->desc3));
                idx++;
        }
  }
  
 -void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
 +void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
                       unsigned int idx)
  {
 -      pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
 -               le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
 -               le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
 +      struct xgbe_ring_data *rdata;
 +      struct xgbe_ring_desc *rdesc;
 +
 +      rdata = XGBE_GET_DESC_DATA(ring, idx);
 +      rdesc = rdata->rdesc;
 +      netdev_dbg(pdata->netdev,
 +                 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
 +                 idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
 +                 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
  }
  
  void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
        unsigned char buffer[128];
        unsigned int i, j;
  
 -      netdev_alert(netdev, "\n************** SKB dump ****************\n");
 +      netdev_dbg(netdev, "\n************** SKB dump ****************\n");
  
 -      netdev_alert(netdev, "%s packet of %d bytes\n",
 -                   (tx_rx ? "TX" : "RX"), skb->len);
 +      netdev_dbg(netdev, "%s packet of %d bytes\n",
 +                 (tx_rx ? "TX" : "RX"), skb->len);
  
 -      netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
 -      netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
 -      netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
 +      netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
 +      netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
 +      netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
  
        for (i = 0, j = 0; i < skb->len;) {
                j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
                              buf[i++]);
  
                if ((i % 32) == 0) {
 -                      netdev_alert(netdev, "  0x%04x: %s\n", i - 32, buffer);
 +                      netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
                        j = 0;
                } else if ((i % 16) == 0) {
                        buffer[j++] = ' ';
                }
        }
        if (i % 32)
 -              netdev_alert(netdev, "  0x%04x: %s\n", i - (i % 32), buffer);
 +              netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
  
 -      netdev_alert(netdev, "\n************** SKB dump ****************\n");
 +      netdev_dbg(netdev, "\n************** SKB dump ****************\n");
  }
index 41150543906a4505aad66b85dedc9be82fc1f1af,c5e1d0ac75f909f843dd0397ad41b85eeb26a164..9eac3227d2cabc15c2d21a4baafafc3761372560
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (C) 2005 - 2014 Emulex
 + * Copyright (C) 2005 - 2015 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -93,7 -93,7 +93,7 @@@ static void be_mcc_notify(struct be_ada
        struct be_queue_info *mccq = &adapter->mcc_obj.q;
        u32 val = 0;
  
 -      if (be_error(adapter))
 +      if (be_check_error(adapter, BE_ERROR_ANY))
                return;
  
        val |= mccq->id & DB_MCCQ_RING_ID_MASK;
@@@ -140,7 -140,6 +140,7 @@@ static bool be_skip_err_log(u8 opcode, 
        if (base_status == MCC_STATUS_NOT_SUPPORTED ||
            base_status == MCC_STATUS_ILLEGAL_REQUEST ||
            addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
 +          addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
            (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
            (base_status == MCC_STATUS_ILLEGAL_FIELD ||
             addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
@@@ -192,12 -191,10 +192,12 @@@ static void be_async_cmd_process(struc
                if (base_status == MCC_STATUS_SUCCESS) {
                        struct be_cmd_resp_get_cntl_addnl_attribs *resp =
                                                        (void *)resp_hdr;
 -                      adapter->drv_stats.be_on_die_temperature =
 +                      adapter->hwmon_info.be_on_die_temp =
                                                resp->on_die_temperature;
                } else {
                        adapter->be_get_temp_freq = 0;
 +                      adapter->hwmon_info.be_on_die_temp =
 +                                              BE_INVALID_DIE_TEMP;
                }
                return;
        }
@@@ -333,21 -330,6 +333,21 @@@ static void be_async_grp5_pvid_state_pr
        }
  }
  
 +#define MGMT_ENABLE_MASK      0x4
 +static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
 +                                           struct be_mcc_compl *compl)
 +{
 +      struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
 +      u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
 +
 +      if (evt_dw1 & MGMT_ENABLE_MASK) {
 +              adapter->flags |= BE_FLAGS_OS2BMC;
 +              adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
 +      } else {
 +              adapter->flags &= ~BE_FLAGS_OS2BMC;
 +      }
 +}
 +
  static void be_async_grp5_evt_process(struct be_adapter *adapter,
                                      struct be_mcc_compl *compl)
  {
        case ASYNC_EVENT_PVID_STATE:
                be_async_grp5_pvid_state_process(adapter, compl);
                break;
 +      /* Async event to disable/enable os2bmc and/or mac-learning */
 +      case ASYNC_EVENT_FW_CONTROL:
 +              be_async_grp5_fw_control_process(adapter, compl);
 +              break;
        default:
                break;
        }
@@@ -508,7 -486,7 +508,7 @@@ static int be_mcc_wait_compl(struct be_
        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  
        for (i = 0; i < mcc_timeout; i++) {
 -              if (be_error(adapter))
 +              if (be_check_error(adapter, BE_ERROR_ANY))
                        return -EIO;
  
                local_bh_disable();
        }
        if (i == mcc_timeout) {
                dev_err(&adapter->pdev->dev, "FW not responding\n");
 -              adapter->fw_timeout = true;
 +              be_set_error(adapter, BE_ERROR_FW);
                return -EIO;
        }
        return status;
@@@ -560,7 -538,7 +560,7 @@@ static int be_mbox_db_ready_wait(struc
        u32 ready;
  
        do {
 -              if (be_error(adapter))
 +              if (be_check_error(adapter, BE_ERROR_ANY))
                        return -EIO;
  
                ready = ioread32(db);
  
                if (msecs > 4000) {
                        dev_err(&adapter->pdev->dev, "FW not responding\n");
 -                      adapter->fw_timeout = true;
 +                      be_set_error(adapter, BE_ERROR_FW);
                        be_detect_error(adapter);
                        return -1;
                }
@@@ -1479,7 -1457,7 +1479,7 @@@ int be_cmd_if_create(struct be_adapter 
                *if_handle = le32_to_cpu(resp->interface_id);
  
                /* Hack to retrieve VF's pmac-id on BE3 */
 -              if (BE3_chip(adapter) && !be_physfn(adapter))
 +              if (BE3_chip(adapter) && be_virtfn(adapter))
                        adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
        }
        return status;
@@@ -1742,9 -1720,9 +1742,9 @@@ int be_cmd_get_regs(struct be_adapter *
        total_size = buf_len;
  
        get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
-       get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
-                                             get_fat_cmd.size,
-                                             &get_fat_cmd.dma);
+       get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                            get_fat_cmd.size,
+                                            &get_fat_cmd.dma, GFP_ATOMIC);
        if (!get_fat_cmd.va) {
                dev_err(&adapter->pdev->dev,
                        "Memory allocation failure while reading FAT data\n");
                log_offset += buf_size;
        }
  err:
-       pci_free_consistent(adapter->pdev, get_fat_cmd.size,
-                           get_fat_cmd.va, get_fat_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
+                         get_fat_cmd.va, get_fat_cmd.dma);
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
  }
@@@ -2237,12 -2215,12 +2237,12 @@@ int be_cmd_read_port_transceiver_data(s
                return -EINVAL;
  
        cmd.size = sizeof(struct be_cmd_resp_port_type);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
                return -ENOMEM;
        }
-       memset(cmd.va, 0, cmd.size);
  
        spin_lock_bh(&adapter->mcc_lock);
  
        }
  err:
        spin_unlock_bh(&adapter->mcc_lock);
-       pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
        return status;
  }
  
@@@ -2742,7 -2720,8 +2742,8 @@@ int be_cmd_get_phy_info(struct be_adapt
                goto err;
        }
        cmd.size = sizeof(struct be_cmd_req_get_phy_info);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                status = -ENOMEM;
                                BE_SUPPORTED_SPEED_1GBPS;
                }
        }
-       pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
  err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@@ -2827,8 -2806,9 +2828,9 @@@ int be_cmd_get_cntl_attributes(struct b
  
        memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
        attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
-       attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
-                                             &attribs_cmd.dma);
+       attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                            attribs_cmd.size,
+                                            &attribs_cmd.dma, GFP_ATOMIC);
        if (!attribs_cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                status = -ENOMEM;
  err:
        mutex_unlock(&adapter->mbox_lock);
        if (attribs_cmd.va)
-               pci_free_consistent(adapter->pdev, attribs_cmd.size,
-                                   attribs_cmd.va, attribs_cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
+                                 attribs_cmd.va, attribs_cmd.dma);
        return status;
  }
  
@@@ -2994,9 -2974,10 +2996,10 @@@ int be_cmd_get_mac_from_list(struct be_
  
        memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
        get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
-       get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
-                                                  get_mac_list_cmd.size,
-                                                  &get_mac_list_cmd.dma);
+       get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                                 get_mac_list_cmd.size,
+                                                 &get_mac_list_cmd.dma,
+                                                 GFP_ATOMIC);
  
        if (!get_mac_list_cmd.va) {
                dev_err(&adapter->pdev->dev,
  
  out:
        spin_unlock_bh(&adapter->mcc_lock);
-       pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
-                           get_mac_list_cmd.va, get_mac_list_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
+                         get_mac_list_cmd.va, get_mac_list_cmd.dma);
        return status;
  }
  
@@@ -3123,8 -3104,8 +3126,8 @@@ int be_cmd_set_mac_list(struct be_adapt
  
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_req_set_mac_list);
-       cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
-                                   &cmd.dma, GFP_KERNEL);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_KERNEL);
        if (!cmd.va)
                return -ENOMEM;
  
@@@ -3175,7 -3156,7 +3178,7 @@@ int be_cmd_set_mac(struct be_adapter *a
  }
  
  int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
 -                        u32 domain, u16 intf_id, u16 hsw_mode)
 +                        u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
  {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_set_hsw_config *req;
                              ctxt, hsw_mode);
        }
  
 +      /* Enable/disable both mac and vlan spoof checking */
 +      if (!BEx_chip(adapter) && spoofchk) {
 +              AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
 +                            ctxt, spoofchk);
 +              AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
 +                            ctxt, spoofchk);
 +      }
 +
        be_dws_cpu_to_le(req->context, sizeof(req->context));
        status = be_mcc_notify_wait(adapter);
  
@@@ -3229,7 -3202,7 +3232,7 @@@ err
  
  /* Get Hyper switch config */
  int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
 -                        u32 domain, u16 intf_id, u8 *mode)
 +                        u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
  {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_hsw_config *req;
                if (mode)
                        *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
                                              port_fwd_type, &resp->context);
 +              if (spoofchk)
 +                      *spoofchk =
 +                              AMAP_GET_BITS(struct amap_get_hsw_resp_context,
 +                                            spoofchk, &resp->context);
        }
  
  err:
@@@ -3292,7 -3261,7 +3295,7 @@@ static bool be_is_wol_excluded(struct b
  {
        struct pci_dev *pdev = adapter->pdev;
  
 -      if (!be_physfn(adapter))
 +      if (be_virtfn(adapter))
                return true;
  
        switch (pdev->subsystem_device) {
@@@ -3325,7 -3294,8 +3328,8 @@@ int be_cmd_get_acpi_wol_cap(struct be_a
  
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                status = -ENOMEM;
  err:
        mutex_unlock(&adapter->mbox_lock);
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
  
  }
@@@ -3374,8 -3345,9 +3379,9 @@@ int be_cmd_set_fw_log_level(struct be_a
  
        memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
        extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
-       extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
-                                            &extfat_cmd.dma);
+       extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           extfat_cmd.size, &extfat_cmd.dma,
+                                           GFP_ATOMIC);
        if (!extfat_cmd.va)
                return -ENOMEM;
  
  
        status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
  err:
-       pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
-                           extfat_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+                         extfat_cmd.dma);
        return status;
  }
  
@@@ -3411,8 -3383,9 +3417,9 @@@ int be_cmd_get_fw_log_level(struct be_a
  
        memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
        extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
-       extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
-                                            &extfat_cmd.dma);
+       extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           extfat_cmd.size, &extfat_cmd.dma,
+                                           GFP_ATOMIC);
  
        if (!extfat_cmd.va) {
                dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
                                level = cfgs->module[0].trace_lvl[j].dbg_lvl;
                }
        }
-       pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
-                           extfat_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+                         extfat_cmd.dma);
  err:
        return level;
  }
@@@ -3629,7 -3602,8 +3636,8 @@@ int be_cmd_get_func_config(struct be_ad
  
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_get_func_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                status = -ENOMEM;
  err:
        mutex_unlock(&adapter->mbox_lock);
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
  }
  
@@@ -3690,7 -3665,8 +3699,8 @@@ int be_cmd_get_profile_config(struct be
  
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va)
                return -ENOMEM;
  
                res->vf_if_cap_flags = vf_res->cap_flags;
  err:
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
  }
  
@@@ -3751,7 -3728,8 +3762,8 @@@ static int be_cmd_set_profile_config(st
  
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_req_set_profile_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va)
                return -ENOMEM;
  
        status = be_cmd_notify_wait(adapter, &wrb);
  
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
  }
  
index 675cbacef772d20fea8e992e51dd134c06bff346,2835dee5dc3930cc5d1d09ec958bd2557228a2cd..b2476dbfd103120affb5e216a31d304dda570a67
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (C) 2005 - 2014 Emulex
 + * Copyright (C) 2005 - 2015 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -123,6 -123,7 +123,6 @@@ static const struct be_ethtool_stat et_
        {DRVSTAT_INFO(dma_map_errors)},
        /* Number of packets dropped due to random early drop function */
        {DRVSTAT_INFO(eth_red_drops)},
 -      {DRVSTAT_INFO(be_on_die_temperature)},
        {DRVSTAT_INFO(rx_roce_bytes_lsd)},
        {DRVSTAT_INFO(rx_roce_bytes_msd)},
        {DRVSTAT_INFO(rx_roce_frames)},
@@@ -263,8 -264,8 +263,8 @@@ static int lancer_cmd_read_file(struct 
        int status = 0;
  
        read_cmd.size = LANCER_READ_FILE_CHUNK;
-       read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
-                                          &read_cmd.dma);
+       read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
+                                         &read_cmd.dma, GFP_ATOMIC);
  
        if (!read_cmd.va) {
                dev_err(&adapter->pdev->dev,
                        break;
                }
        }
-       pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
-                           read_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
+                         read_cmd.dma);
  
        return status;
  }
@@@ -367,14 -368,6 +367,14 @@@ static int be_set_coalesce(struct net_d
                aic++;
        }
  
 +      /* For Skyhawk, the EQD setting happens via EQ_DB when AIC is enabled.
 +       * When AIC is disabled, persistently force set EQD value via the
 +       * FW cmd, so that we don't have to calculate the delay multiplier
 +       * encode value each time EQ_DB is rung
 +       */
 +      if (!et->use_adaptive_rx_coalesce && skyhawk_chip(adapter))
 +              be_eqd_update(adapter, true);
 +
        return 0;
  }
  
@@@ -825,8 -818,9 +825,9 @@@ static int be_test_ddr_dma(struct be_ad
        };
  
        ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
-       ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
-                                          &ddrdma_cmd.dma, GFP_KERNEL);
+       ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           ddrdma_cmd.size, &ddrdma_cmd.dma,
+                                           GFP_KERNEL);
        if (!ddrdma_cmd.va)
                return -ENOMEM;
  
@@@ -948,8 -942,9 +949,9 @@@ static int be_read_eeprom(struct net_de
  
        memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
        eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
-       eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
-                                          &eeprom_cmd.dma, GFP_KERNEL);
+       eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           eeprom_cmd.size, &eeprom_cmd.dma,
+                                           GFP_KERNEL);
  
        if (!eeprom_cmd.va)
                return -ENOMEM;
index 1365a56f78df7b9ca692091268f50799370e0577,e43cc8a73ea7e85a927443c077c18ce6c673751a..c0f34845cf5912630a56f753d0c5e103d8f14daf
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (C) 2005 - 2014 Emulex
 + * Copyright (C) 2005 - 2015 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -179,7 -179,7 +179,7 @@@ static void be_intr_set(struct be_adapt
        if (lancer_chip(adapter))
                return;
  
 -      if (adapter->eeh_error)
 +      if (be_check_error(adapter, BE_ERROR_EEH))
                return;
  
        status = be_cmd_intr_set(adapter, enable);
@@@ -191,9 -191,6 +191,9 @@@ static void be_rxq_notify(struct be_ada
  {
        u32 val = 0;
  
 +      if (be_check_error(adapter, BE_ERROR_HW))
 +              return;
 +
        val |= qid & DB_RQ_RING_ID_MASK;
        val |= posted << DB_RQ_NUM_POSTED_SHIFT;
  
@@@ -206,9 -203,6 +206,9 @@@ static void be_txq_notify(struct be_ada
  {
        u32 val = 0;
  
 +      if (be_check_error(adapter, BE_ERROR_HW))
 +              return;
 +
        val |= txo->q.id & DB_TXULP_RING_ID_MASK;
        val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
  
  }
  
  static void be_eq_notify(struct be_adapter *adapter, u16 qid,
 -                       bool arm, bool clear_int, u16 num_popped)
 +                       bool arm, bool clear_int, u16 num_popped,
 +                       u32 eq_delay_mult_enc)
  {
        u32 val = 0;
  
        val |= qid & DB_EQ_RING_ID_MASK;
        val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
  
 -      if (adapter->eeh_error)
 +      if (be_check_error(adapter, BE_ERROR_HW))
                return;
  
        if (arm)
                val |= 1 << DB_EQ_CLR_SHIFT;
        val |= 1 << DB_EQ_EVNT_SHIFT;
        val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
 +      val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
        iowrite32(val, adapter->db + DB_EQ_OFFSET);
  }
  
@@@ -246,7 -238,7 +246,7 @@@ void be_cq_notify(struct be_adapter *ad
        val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
                        DB_CQ_RING_ID_EXT_MASK_SHIFT);
  
 -      if (adapter->eeh_error)
 +      if (be_check_error(adapter, BE_ERROR_HW))
                return;
  
        if (arm)
@@@ -670,8 -662,6 +670,8 @@@ void be_link_status_update(struct be_ad
                netif_carrier_on(netdev);
        else
                netif_carrier_off(netdev);
 +
 +      netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
  }
  
  static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
@@@ -820,8 -810,6 +820,8 @@@ static void wrb_fill_hdr(struct be_adap
  
        SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
        SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
 +      SET_TX_WRB_HDR_BITS(mgmt, hdr,
 +                          BE_WRB_F_GET(wrb_params->features, OS2BMC));
  }
  
  static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
@@@ -1158,130 -1146,6 +1158,130 @@@ static void be_xmit_flush(struct be_ada
        txo->pend_wrb_cnt = 0;
  }
  
 +/* OS2BMC related */
 +
 +#define DHCP_CLIENT_PORT      68
 +#define DHCP_SERVER_PORT      67
 +#define NET_BIOS_PORT1                137
 +#define NET_BIOS_PORT2                138
 +#define DHCPV6_RAS_PORT               547
 +
 +#define is_mc_allowed_on_bmc(adapter, eh)     \
 +      (!is_multicast_filt_enabled(adapter) && \
 +       is_multicast_ether_addr(eh->h_dest) && \
 +       !is_broadcast_ether_addr(eh->h_dest))
 +
 +#define is_bc_allowed_on_bmc(adapter, eh)     \
 +      (!is_broadcast_filt_enabled(adapter) && \
 +       is_broadcast_ether_addr(eh->h_dest))
 +
 +#define is_arp_allowed_on_bmc(adapter, skb)   \
 +      (is_arp(skb) && is_arp_filt_enabled(adapter))
 +
 +#define is_broadcast_packet(eh, adapter)      \
 +              (is_multicast_ether_addr(eh->h_dest) && \
 +              !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
 +
 +#define is_arp(skb)   (skb->protocol == htons(ETH_P_ARP))
 +
 +#define is_arp_filt_enabled(adapter)  \
 +              (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
 +
 +#define is_dhcp_client_filt_enabled(adapter)  \
 +              (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
 +
 +#define is_dhcp_srvr_filt_enabled(adapter)    \
 +              (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
 +
 +#define is_nbios_filt_enabled(adapter)        \
 +              (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
 +
 +#define is_ipv6_na_filt_enabled(adapter)      \
 +              (adapter->bmc_filt_mask &       \
 +                      BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
 +
 +#define is_ipv6_ra_filt_enabled(adapter)      \
 +              (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
 +
 +#define is_ipv6_ras_filt_enabled(adapter)     \
 +              (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
 +
 +#define is_broadcast_filt_enabled(adapter)    \
 +              (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
 +
 +#define is_multicast_filt_enabled(adapter)    \
 +              (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
 +
 +static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
 +                             struct sk_buff **skb)
 +{
 +      struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
 +      bool os2bmc = false;
 +
 +      if (!be_is_os2bmc_enabled(adapter))
 +              goto done;
 +
 +      if (!is_multicast_ether_addr(eh->h_dest))
 +              goto done;
 +
 +      if (is_mc_allowed_on_bmc(adapter, eh) ||
 +          is_bc_allowed_on_bmc(adapter, eh) ||
 +          is_arp_allowed_on_bmc(adapter, (*skb))) {
 +              os2bmc = true;
 +              goto done;
 +      }
 +
 +      if ((*skb)->protocol == htons(ETH_P_IPV6)) {
 +              struct ipv6hdr *hdr = ipv6_hdr((*skb));
 +              u8 nexthdr = hdr->nexthdr;
 +
 +              if (nexthdr == IPPROTO_ICMPV6) {
 +                      struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
 +
 +                      switch (icmp6->icmp6_type) {
 +                      case NDISC_ROUTER_ADVERTISEMENT:
 +                              os2bmc = is_ipv6_ra_filt_enabled(adapter);
 +                              goto done;
 +                      case NDISC_NEIGHBOUR_ADVERTISEMENT:
 +                              os2bmc = is_ipv6_na_filt_enabled(adapter);
 +                              goto done;
 +                      default:
 +                              break;
 +                      }
 +              }
 +      }
 +
 +      if (is_udp_pkt((*skb))) {
 +              struct udphdr *udp = udp_hdr((*skb));
 +
 +              switch (udp->dest) {
 +              case DHCP_CLIENT_PORT:
 +                      os2bmc = is_dhcp_client_filt_enabled(adapter);
 +                      goto done;
 +              case DHCP_SERVER_PORT:
 +                      os2bmc = is_dhcp_srvr_filt_enabled(adapter);
 +                      goto done;
 +              case NET_BIOS_PORT1:
 +              case NET_BIOS_PORT2:
 +                      os2bmc = is_nbios_filt_enabled(adapter);
 +                      goto done;
 +              case DHCPV6_RAS_PORT:
 +                      os2bmc = is_ipv6_ras_filt_enabled(adapter);
 +                      goto done;
 +              default:
 +                      break;
 +              }
 +      }
 +done:
 +      /* For packets over a vlan, which are destined
 +       * to BMC, asic expects the vlan to be inline in the packet.
 +       */
 +      if (os2bmc)
 +              *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
 +
 +      return os2bmc;
 +}
 +
  static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
  {
        struct be_adapter *adapter = netdev_priv(netdev);
                goto drop;
        }
  
 +      /* if os2bmc is enabled and if the pkt is destined to bmc,
 +       * enqueue the pkt a 2nd time with mgmt bit set.
 +       */
 +      if (be_send_pkt_to_bmc(adapter, &skb)) {
 +              BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
 +              wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
 +              if (unlikely(!wrb_cnt))
 +                      goto drop;
 +              else
 +                      skb_get(skb);
 +      }
 +
        if (be_is_txq_full(txo)) {
                netif_stop_subqueue(netdev, q_idx);
                tx_stats(txo)->tx_stops++;
@@@ -1413,8 -1265,7 +1413,8 @@@ static int be_vid_config(struct be_adap
        if (status) {
                dev_err(dev, "Setting HW VLAN filtering failed\n");
                /* Set to VLAN promisc mode as setting VLAN filter failed */
 -              if (addl_status(status) ==
 +              if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
 +                  addl_status(status) ==
                                MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
                        return be_set_vlan_promisc(adapter);
        } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
@@@ -1615,7 -1466,6 +1615,7 @@@ static int be_get_vf_config(struct net_
        vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
        memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
        vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
 +      vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
  
        return 0;
  }
@@@ -1628,7 -1478,7 +1628,7 @@@ static int be_set_vf_tvt(struct be_adap
        int status;
  
        /* Enable Transparent VLAN Tagging */
 -      status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
 +      status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
        if (status)
                return status;
  
@@@ -1657,7 -1507,7 +1657,7 @@@ static int be_clear_vf_tvt(struct be_ad
  
        /* Reset Transparent VLAN Tagging. */
        status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
 -                                     vf_cfg->if_handle, 0);
 +                                     vf_cfg->if_handle, 0, 0);
        if (status)
                return status;
  
@@@ -1792,39 -1642,6 +1792,39 @@@ static int be_set_vf_link_state(struct 
        return 0;
  }
  
 +static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
 +{
 +      struct be_adapter *adapter = netdev_priv(netdev);
 +      struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
 +      u8 spoofchk;
 +      int status;
 +
 +      if (!sriov_enabled(adapter))
 +              return -EPERM;
 +
 +      if (vf >= adapter->num_vfs)
 +              return -EINVAL;
 +
 +      if (BEx_chip(adapter))
 +              return -EOPNOTSUPP;
 +
 +      if (enable == vf_cfg->spoofchk)
 +              return 0;
 +
 +      spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
 +
 +      status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
 +                                     0, spoofchk);
 +      if (status) {
 +              dev_err(&adapter->pdev->dev,
 +                      "Spoofchk change on VF %d failed: %#x\n", vf, status);
 +              return be_cmd_status(status);
 +      }
 +
 +      vf_cfg->spoofchk = enable;
 +      return 0;
 +}
 +
  static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
                          ulong now)
  {
        aic->jiffies = now;
  }
  
 -static void be_eqd_update(struct be_adapter *adapter)
 +static int be_get_new_eqd(struct be_eq_obj *eqo)
  {
 -      struct be_set_eqd set_eqd[MAX_EVT_QS];
 -      int eqd, i, num = 0, start;
 +      struct be_adapter *adapter = eqo->adapter;
 +      int eqd, start;
        struct be_aic_obj *aic;
 -      struct be_eq_obj *eqo;
        struct be_rx_obj *rxo;
        struct be_tx_obj *txo;
 -      u64 rx_pkts, tx_pkts;
 +      u64 rx_pkts = 0, tx_pkts = 0;
        ulong now;
        u32 pps, delta;
 +      int i;
  
 -      for_all_evt_queues(adapter, eqo, i) {
 -              aic = &adapter->aic_obj[eqo->idx];
 -              if (!aic->enable) {
 -                      if (aic->jiffies)
 -                              aic->jiffies = 0;
 -                      eqd = aic->et_eqd;
 -                      goto modify_eqd;
 -              }
 +      aic = &adapter->aic_obj[eqo->idx];
 +      if (!aic->enable) {
 +              if (aic->jiffies)
 +                      aic->jiffies = 0;
 +              eqd = aic->et_eqd;
 +              return eqd;
 +      }
  
 -              rxo = &adapter->rx_obj[eqo->idx];
 +      for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
                do {
                        start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
 -                      rx_pkts = rxo->stats.rx_pkts;
 +                      rx_pkts += rxo->stats.rx_pkts;
                } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
 +      }
  
 -              txo = &adapter->tx_obj[eqo->idx];
 +      for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
                do {
                        start = u64_stats_fetch_begin_irq(&txo->stats.sync);
 -                      tx_pkts = txo->stats.tx_reqs;
 +                      tx_pkts += txo->stats.tx_reqs;
                } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
 +      }
  
 -              /* Skip, if wrapped around or first calculation */
 -              now = jiffies;
 -              if (!aic->jiffies || time_before(now, aic->jiffies) ||
 -                  rx_pkts < aic->rx_pkts_prev ||
 -                  tx_pkts < aic->tx_reqs_prev) {
 -                      be_aic_update(aic, rx_pkts, tx_pkts, now);
 -                      continue;
 -              }
 +      /* Skip, if wrapped around or first calculation */
 +      now = jiffies;
 +      if (!aic->jiffies || time_before(now, aic->jiffies) ||
 +          rx_pkts < aic->rx_pkts_prev ||
 +          tx_pkts < aic->tx_reqs_prev) {
 +              be_aic_update(aic, rx_pkts, tx_pkts, now);
 +              return aic->prev_eqd;
 +      }
  
 -              delta = jiffies_to_msecs(now - aic->jiffies);
 -              pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
 -                      (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
 -              eqd = (pps / 15000) << 2;
 +      delta = jiffies_to_msecs(now - aic->jiffies);
 +      if (delta == 0)
 +              return aic->prev_eqd;
  
 -              if (eqd < 8)
 -                      eqd = 0;
 -              eqd = min_t(u32, eqd, aic->max_eqd);
 -              eqd = max_t(u32, eqd, aic->min_eqd);
 +      pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
 +              (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
 +      eqd = (pps / 15000) << 2;
  
 -              be_aic_update(aic, rx_pkts, tx_pkts, now);
 -modify_eqd:
 -              if (eqd != aic->prev_eqd) {
 +      if (eqd < 8)
 +              eqd = 0;
 +      eqd = min_t(u32, eqd, aic->max_eqd);
 +      eqd = max_t(u32, eqd, aic->min_eqd);
 +
 +      be_aic_update(aic, rx_pkts, tx_pkts, now);
 +
 +      return eqd;
 +}
 +
 +/* For Skyhawk-R only */
 +static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
 +{
 +      struct be_adapter *adapter = eqo->adapter;
 +      struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
 +      ulong now = jiffies;
 +      int eqd;
 +      u32 mult_enc;
 +
 +      if (!aic->enable)
 +              return 0;
 +
 +      if (time_before_eq(now, aic->jiffies) ||
 +          jiffies_to_msecs(now - aic->jiffies) < 1)
 +              eqd = aic->prev_eqd;
 +      else
 +              eqd = be_get_new_eqd(eqo);
 +
 +      if (eqd > 100)
 +              mult_enc = R2I_DLY_ENC_1;
 +      else if (eqd > 60)
 +              mult_enc = R2I_DLY_ENC_2;
 +      else if (eqd > 20)
 +              mult_enc = R2I_DLY_ENC_3;
 +      else
 +              mult_enc = R2I_DLY_ENC_0;
 +
 +      aic->prev_eqd = eqd;
 +
 +      return mult_enc;
 +}
 +
 +void be_eqd_update(struct be_adapter *adapter, bool force_update)
 +{
 +      struct be_set_eqd set_eqd[MAX_EVT_QS];
 +      struct be_aic_obj *aic;
 +      struct be_eq_obj *eqo;
 +      int i, num = 0, eqd;
 +
 +      for_all_evt_queues(adapter, eqo, i) {
 +              aic = &adapter->aic_obj[eqo->idx];
 +              eqd = be_get_new_eqd(eqo);
 +              if (force_update || eqd != aic->prev_eqd) {
                        set_eqd[num].delay_multiplier = (eqd * 65)/100;
                        set_eqd[num].eq_id = eqo->q.id;
                        aic->prev_eqd = eqd;
@@@ -2444,7 -2212,7 +2444,7 @@@ static void be_eq_clean(struct be_eq_ob
  {
        int num = events_get(eqo);
  
 -      be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
 +      be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
  }
  
  static void be_rx_cq_clean(struct be_rx_obj *rxo)
                        if (lancer_chip(adapter))
                                break;
  
 -                      if (flush_wait++ > 10 || be_hw_error(adapter)) {
 +                      if (flush_wait++ > 50 ||
 +                          be_check_error(adapter,
 +                                         BE_ERROR_HW)) {
                                dev_warn(&adapter->pdev->dev,
                                         "did not receive flush compl\n");
                                break;
@@@ -2531,8 -2297,7 +2531,8 @@@ static void be_tx_compl_clean(struct be
                                pending_txqs--;
                }
  
 -              if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
 +              if (pending_txqs == 0 || ++timeo > 10 ||
 +                  be_check_error(adapter, BE_ERROR_HW))
                        break;
  
                mdelay(1);
@@@ -2808,7 -2573,7 +2808,7 @@@ static irqreturn_t be_intx(int irq, voi
                if (num_evts)
                        eqo->spurious_intr = 0;
        }
 -      be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
 +      be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
  
        /* Return IRQ_HANDLED only for the the first spurious intr
         * after a valid intr to stop the kernel from branding
@@@ -2824,7 -2589,7 +2824,7 @@@ static irqreturn_t be_msix(int irq, voi
  {
        struct be_eq_obj *eqo = dev;
  
 -      be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
 +      be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
        napi_schedule(&eqo->napi);
        return IRQ_HANDLED;
  }
@@@ -3073,7 -2838,6 +3073,7 @@@ int be_poll(struct napi_struct *napi, i
        int max_work = 0, work, i, num_evts;
        struct be_rx_obj *rxo;
        struct be_tx_obj *txo;
 +      u32 mult_enc = 0;
  
        num_evts = events_get(eqo);
  
  
        if (max_work < budget) {
                napi_complete(napi);
 -              be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
 +
 +              /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
 +               * delay via a delay multiplier encoding value
 +               */
 +              if (skyhawk_chip(adapter))
 +                      mult_enc = be_get_eq_delay_mult_enc(eqo);
 +
 +              be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
 +                           mult_enc);
        } else {
                /* As we'll continue in polling mode, count and clear events */
 -              be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
 +              be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
        }
        return max_work;
  }
@@@ -3142,19 -2898,22 +3142,19 @@@ void be_detect_error(struct be_adapter 
        u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
        u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
        u32 i;
 -      bool error_detected = false;
        struct device *dev = &adapter->pdev->dev;
 -      struct net_device *netdev = adapter->netdev;
  
 -      if (be_hw_error(adapter))
 +      if (be_check_error(adapter, BE_ERROR_HW))
                return;
  
        if (lancer_chip(adapter)) {
                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
                if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
 +                      be_set_error(adapter, BE_ERROR_UE);
                        sliport_err1 = ioread32(adapter->db +
                                                SLIPORT_ERROR1_OFFSET);
                        sliport_err2 = ioread32(adapter->db +
                                                SLIPORT_ERROR2_OFFSET);
 -                      adapter->hw_error = true;
 -                      error_detected = true;
                        /* Do not log error messages if its a FW reset */
                        if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
                            sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
                 */
  
                if (ue_lo || ue_hi) {
 -                      error_detected = true;
                        dev_err(dev,
                                "Unrecoverable Error detected in the adapter");
                        dev_err(dev, "Please reboot server to recover");
                        if (skyhawk_chip(adapter))
 -                              adapter->hw_error = true;
 +                              be_set_error(adapter, BE_ERROR_UE);
 +
                        for (i = 0; ue_lo; ue_lo >>= 1, i++) {
                                if (ue_lo & 1)
                                        dev_err(dev, "UE: %s bit set\n",
                        }
                }
        }
 -      if (error_detected)
 -              netif_carrier_off(netdev);
  }
  
  static void be_msix_disable(struct be_adapter *adapter)
@@@ -3254,7 -3015,7 +3254,7 @@@ fail
        dev_warn(dev, "MSIx enable failed\n");
  
        /* INTx is not supported in VFs, so fail probe if enable_msix fails */
 -      if (!be_physfn(adapter))
 +      if (be_virtfn(adapter))
                return num_vec;
        return 0;
  }
@@@ -3301,7 -3062,7 +3301,7 @@@ static int be_irq_register(struct be_ad
                if (status == 0)
                        goto done;
                /* INTx is not supported for VF */
 -              if (!be_physfn(adapter))
 +              if (be_virtfn(adapter))
                        return status;
        }
  
@@@ -3468,12 -3229,9 +3468,12 @@@ static int be_rx_qs_create(struct be_ad
  
        memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
  
 -      /* First time posting */
 +      /* Post 1 less than RXQ-len to avoid head being equal to tail,
 +       * which is a queue empty condition
 +       */
        for_all_rx_queues(adapter, rxo, i)
 -              be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
 +              be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
 +
        return 0;
  }
  
@@@ -3505,7 -3263,7 +3505,7 @@@ static int be_open(struct net_device *n
        for_all_evt_queues(adapter, eqo, i) {
                napi_enable(&eqo->napi);
                be_enable_busy_poll(eqo);
 -              be_eq_notify(adapter, eqo->q.id, true, true, 0);
 +              be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
        }
        adapter->flags |= BE_FLAGS_NAPI_ENABLED;
  
@@@ -3805,7 -3563,7 +3805,7 @@@ static int be_vfs_if_create(struct be_a
  
        /* If a FW profile exists, then cap_flags are updated */
        cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
 -                  BE_IF_FLAGS_MULTICAST;
 +                  BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
  
        for_all_vfs(adapter, vf_cfg, vf) {
                if (!BE3_chip(adapter)) {
@@@ -3852,7 -3610,6 +3852,7 @@@ static int be_vf_setup(struct be_adapte
        struct device *dev = &adapter->pdev->dev;
        struct be_vf_cfg *vf_cfg;
        int status, old_vfs, vf;
 +      bool spoofchk;
  
        old_vfs = pci_num_vf(adapter->pdev);
  
                if (!old_vfs)
                        be_cmd_config_qos(adapter, 0, 0, vf + 1);
  
 +              status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
 +                                             vf_cfg->if_handle, NULL,
 +                                             &spoofchk);
 +              if (!status)
 +                      vf_cfg->spoofchk = spoofchk;
 +
                if (!old_vfs) {
                        be_cmd_enable_vf(adapter, vf + 1);
                        be_cmd_set_logical_link_config(adapter,
@@@ -3982,9 -3733,8 +3982,9 @@@ static void BEx_get_resources(struct be
         *    *only* if it is RSS-capable.
         */
        if (BE2_chip(adapter) || use_sriov ||  (adapter->port_num > 1) ||
 -          !be_physfn(adapter) || (be_is_mc(adapter) &&
 -          !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
 +          be_virtfn(adapter) ||
 +          (be_is_mc(adapter) &&
 +           !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
                res->max_tx_qs = 1;
        } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
                struct be_resources super_nic_res = {0};
@@@ -4325,7 -4075,7 +4325,7 @@@ static int be_func_init(struct be_adapt
                msleep(100);
  
                /* We can clear all errors when function reset succeeds */
 -              be_clear_all_error(adapter);
 +              be_clear_error(adapter, BE_CLEAR_ALL);
        }
  
        /* Tell FW we're ready to fire cmds */
@@@ -4432,7 -4182,7 +4432,7 @@@ static void be_netpoll(struct net_devic
        int i;
  
        for_all_evt_queues(adapter, eqo, i) {
 -              be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
 +              be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
                napi_schedule(&eqo->napi);
        }
  }
@@@ -4855,8 -4605,8 +4855,8 @@@ static int lancer_fw_download(struct be
  
        flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
                                + LANCER_FW_DOWNLOAD_CHUNK;
-       flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
-                                         &flash_cmd.dma, GFP_KERNEL);
+       flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
+                                          &flash_cmd.dma, GFP_KERNEL);
        if (!flash_cmd.va)
                return -ENOMEM;
  
        return 0;
  }
  
 -#define BE2_UFI               2
 -#define BE3_UFI               3
 -#define BE3R_UFI      10
 -#define SH_UFI                4
 -#define SH_P2_UFI     11
 -
 -static int be_get_ufi_type(struct be_adapter *adapter,
 -                         struct flash_file_hdr_g3 *fhdr)
 +/* Check if the flash image file is compatible with the adapter that
 + * is being flashed.
 + */
 +static bool be_check_ufi_compatibility(struct be_adapter *adapter,
 +                                     struct flash_file_hdr_g3 *fhdr)
  {
        if (!fhdr) {
                dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
         */
        switch (fhdr->build[0]) {
        case BLD_STR_UFI_TYPE_SH:
 -              return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
 -                                                              SH_UFI;
 +              if (!skyhawk_chip(adapter))
 +                      return false;
 +              break;
        case BLD_STR_UFI_TYPE_BE3:
 -              return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
 -                                                              BE3_UFI;
 +              if (!BE3_chip(adapter))
 +                      return false;
 +              break;
        case BLD_STR_UFI_TYPE_BE2:
 -              return BE2_UFI;
 -      default:
 -              return -1;
 -      }
 -}
 -
 -/* Check if the flash image file is compatible with the adapter that
 - * is being flashed.
 - * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
 - * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
 - */
 -static bool be_check_ufi_compatibility(struct be_adapter *adapter,
 -                                     struct flash_file_hdr_g3 *fhdr)
 -{
 -      int ufi_type = be_get_ufi_type(adapter, fhdr);
 -
 -      switch (ufi_type) {
 -      case SH_P2_UFI:
 -              return skyhawk_chip(adapter);
 -      case SH_UFI:
 -              return (skyhawk_chip(adapter) &&
 -                      adapter->asic_rev < ASIC_REV_P2);
 -      case BE3R_UFI:
 -              return BE3_chip(adapter);
 -      case BE3_UFI:
 -              return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
 -      case BE2_UFI:
 -              return BE2_chip(adapter);
 +              if (!BE2_chip(adapter))
 +                      return false;
 +              break;
        default:
                return false;
        }
 +
 +      return (fhdr->asic_type_rev >= adapter->asic_rev);
  }
  
  static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
        }
  
        flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
-       flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
-                                         GFP_KERNEL);
+       flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+                                          GFP_KERNEL);
        if (!flash_cmd.va)
                return -ENOMEM;
  
@@@ -5055,7 -4829,7 +5055,7 @@@ static int be_ndo_bridge_setlink(struc
                                               adapter->if_handle,
                                               mode == BRIDGE_MODE_VEPA ?
                                               PORT_FWD_TYPE_VEPA :
 -                                             PORT_FWD_TYPE_VEB);
 +                                             PORT_FWD_TYPE_VEB, 0);
                if (status)
                        goto err;
  
@@@ -5087,8 -4861,7 +5087,8 @@@ static int be_ndo_bridge_getlink(struc
                hsw_mode = PORT_FWD_TYPE_VEB;
        } else {
                status = be_cmd_get_hsw_config(adapter, NULL, 0,
 -                                             adapter->if_handle, &hsw_mode);
 +                                             adapter->if_handle, &hsw_mode,
 +                                             NULL);
                if (status)
                        return 0;
        }
@@@ -5241,7 -5014,6 +5241,7 @@@ static const struct net_device_ops be_n
        .ndo_set_vf_rate        = be_set_vf_tx_rate,
        .ndo_get_vf_config      = be_get_vf_config,
        .ndo_set_vf_link_state  = be_set_vf_link_state,
 +      .ndo_set_vf_spoofchk    = be_set_vf_spoofchk,
  #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = be_netpoll,
  #endif
@@@ -5346,7 -5118,7 +5346,7 @@@ static void be_err_detection_task(struc
  
        be_detect_error(adapter);
  
 -      if (adapter->hw_error) {
 +      if (be_check_error(adapter, BE_ERROR_HW)) {
                be_cleanup(adapter);
  
                /* As of now error recovery support is in Lancer only */
@@@ -5410,9 -5182,7 +5410,9 @@@ static void be_worker(struct work_struc
                        be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
        }
  
 -      be_eqd_update(adapter);
 +      /* EQ-delay update for Skyhawk is done while notifying EQ */
 +      if (!skyhawk_chip(adapter))
 +              be_eqd_update(adapter, false);
  
        if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
                be_log_sfp_info(adapter);
@@@ -5432,7 -5202,7 +5432,7 @@@ static void be_unmap_pci_bars(struct be
  
  static int db_bar(struct be_adapter *adapter)
  {
 -      if (lancer_chip(adapter) || !be_physfn(adapter))
 +      if (lancer_chip(adapter) || be_virtfn(adapter))
                return 0;
        else
                return 4;
@@@ -5521,16 -5291,15 +5521,15 @@@ static int be_drv_init(struct be_adapte
        int status = 0;
  
        mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
-       mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
-                                               &mbox_mem_alloc->dma,
-                                               GFP_KERNEL);
+       mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
+                                                &mbox_mem_alloc->dma,
+                                                GFP_KERNEL);
        if (!mbox_mem_alloc->va)
                return -ENOMEM;
  
        mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
        mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
        mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
-       memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
  
        rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
        rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
@@@ -5612,30 -5381,6 +5611,30 @@@ static void be_remove(struct pci_dev *p
        free_netdev(adapter->netdev);
  }
  
 +static ssize_t be_hwmon_show_temp(struct device *dev,
 +                                struct device_attribute *dev_attr,
 +                                char *buf)
 +{
 +      struct be_adapter *adapter = dev_get_drvdata(dev);
 +
 +      /* Unit: millidegree Celsius */
 +      if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
 +              return -EIO;
 +      else
 +              return sprintf(buf, "%u\n",
 +                             adapter->hwmon_info.be_on_die_temp * 1000);
 +}
 +
 +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
 +                        be_hwmon_show_temp, NULL, 1);
 +
 +static struct attribute *be_hwmon_attrs[] = {
 +      &sensor_dev_attr_temp1_input.dev_attr.attr,
 +      NULL
 +};
 +
 +ATTRIBUTE_GROUPS(be_hwmon);
 +
  static char *mc_name(struct be_adapter *adapter)
  {
        char *str = ""; /* default */
@@@ -5755,16 -5500,6 +5754,16 @@@ static int be_probe(struct pci_dev *pde
  
        be_schedule_err_detection(adapter);
  
 +      /* On Die temperature not supported for VF. */
 +      if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
 +              adapter->hwmon_info.hwmon_dev =
 +                      devm_hwmon_device_register_with_groups(&pdev->dev,
 +                                                             DRV_NAME,
 +                                                             adapter,
 +                                                             be_hwmon_groups);
 +              adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
 +      }
 +
        dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
                 func_name(adapter), mc_name(adapter), adapter->port_name);
  
@@@ -5857,8 -5592,8 +5856,8 @@@ static pci_ers_result_t be_eeh_err_dete
  
        dev_err(&adapter->pdev->dev, "EEH error detected\n");
  
 -      if (!adapter->eeh_error) {
 -              adapter->eeh_error = true;
 +      if (!be_check_error(adapter, BE_ERROR_EEH)) {
 +              be_set_error(adapter, BE_ERROR_EEH);
  
                be_cancel_err_detection(adapter);
  
@@@ -5905,7 -5640,7 +5904,7 @@@ static pci_ers_result_t be_eeh_reset(st
                return PCI_ERS_RESULT_DISCONNECT;
  
        pci_cleanup_aer_uncorrect_error_status(pdev);
 -      be_clear_all_error(adapter);
 +      be_clear_error(adapter, BE_CLEAR_ALL);
        return PCI_ERS_RESULT_RECOVERED;
  }
  
index aca9cef50d81a521f21aa74a93ebaebd0a27d793,5d47307121abbe413cd259ff74f9aa2ee68e6c45..ec76c3fa3a041158dcb5c21872afd5dd8352b9aa
@@@ -182,7 -182,6 +182,7 @@@ struct i40e_lump_tracking 
  enum i40e_fd_stat_idx {
        I40E_FD_STAT_ATR,
        I40E_FD_STAT_SB,
 +      I40E_FD_STAT_ATR_TUNNEL,
        I40E_FD_STAT_PF_COUNT
  };
  #define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
                        (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
  #define I40E_FD_SB_STAT_IDX(pf_id)  \
                        (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
 +#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
 +                      (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
  
  struct i40e_fdir_filter {
        struct hlist_node fdir_node;
@@@ -266,6 -263,8 +266,6 @@@ struct i40e_pf 
  
        struct hlist_head fdir_filter_list;
        u16 fdir_pf_active_filters;
 -      u16 fd_sb_cnt_idx;
 -      u16 fd_atr_cnt_idx;
        unsigned long fd_flush_timestamp;
        u32 fd_flush_cnt;
        u32 fd_add_err;
  #endif
  #define I40E_FLAG_PORT_ID_VALID                (u64)(1 << 28)
  #define I40E_FLAG_DCB_CAPABLE                  (u64)(1 << 29)
+ #define I40E_FLAG_VEB_MODE_ENABLED            BIT_ULL(40)
  
        /* tracks features that get auto disabled by errors */
        u64 auto_disable_flags;
index 0a3e928a2b0014b62ecbbd4d27efd1b7607816dd,5b5bea159bd53c8684d0a69b310e492bc797c8b6..52d7d8b8f1f97542f78147ce7ae5a4d4508015e8
@@@ -39,7 -39,7 +39,7 @@@ static const char i40e_driver_string[] 
  
  #define DRV_VERSION_MAJOR 1
  #define DRV_VERSION_MINOR 3
 -#define DRV_VERSION_BUILD 2
 +#define DRV_VERSION_BUILD 4
  #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@@ -772,8 -772,9 +772,8 @@@ static void i40e_update_prio_xoff_rx(st
  
        dcb_cfg = &hw->local_dcbx_config;
  
 -      /* See if DCB enabled with PFC TC */
 -      if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
 -          !(dcb_cfg->pfc.pfcenable)) {
 +      /* Collect Link XOFF stats when PFC is disabled */
 +      if (!dcb_cfg->pfc.pfcenable) {
                i40e_update_link_xoff_rx(pf);
                return;
        }
@@@ -1096,18 -1097,12 +1096,18 @@@ static void i40e_update_pf_stats(struc
                           &osd->rx_jabber, &nsd->rx_jabber);
  
        /* FDIR stats */
 -      i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
 +      i40e_stat_update32(hw,
 +                         I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
                           pf->stat_offsets_loaded,
                           &osd->fd_atr_match, &nsd->fd_atr_match);
 -      i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
 +      i40e_stat_update32(hw,
 +                         I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
                           pf->stat_offsets_loaded,
                           &osd->fd_sb_match, &nsd->fd_sb_match);
 +      i40e_stat_update32(hw,
 +                    I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
 +                    pf->stat_offsets_loaded,
 +                    &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
  
        val = rd32(hw, I40E_PRTPM_EEE_STAT);
        nsd->tx_lpi_status =
@@@ -4744,8 -4739,7 +4744,8 @@@ static int i40e_up_complete(struct i40e
                pf->fd_add_err = pf->fd_atr_cnt = 0;
                if (pf->fd_tcp_rule > 0) {
                        pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
 -                      dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
 +                      if (I40E_DEBUG_FD & pf->hw.debug_mask)
 +                              dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
                        pf->fd_tcp_rule = 0;
                }
                i40e_fdir_filter_restore(vsi);
@@@ -5434,8 -5428,7 +5434,8 @@@ void i40e_fdir_check_and_reenable(struc
                if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
                        pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
 -                      dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
 +                      if (I40E_DEBUG_FD & pf->hw.debug_mask)
 +                              dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
                }
        }
        /* Wait for some more space to be available to turn on ATR */
                if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
                        pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
 -                      dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
 +                      if (I40E_DEBUG_FD & pf->hw.debug_mask)
 +                              dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
                }
        }
  }
@@@ -5477,8 -5469,7 +5477,8 @@@ static void i40e_fdir_flush_and_replay(
  
                if (!(time_after(jiffies, min_flush_time)) &&
                    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
 -                      dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
 +                      if (I40E_DEBUG_FD & pf->hw.debug_mask)
 +                              dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
                        disable_atr = true;
                }
  
                        if (!disable_atr)
                                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
                        clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
 -                      dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
 +                      if (I40E_DEBUG_FD & pf->hw.debug_mask)
 +                              dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
                }
        }
  }
@@@ -6107,6 -6097,10 +6107,10 @@@ static int i40e_reconstitute_veb(struc
        if (ret)
                goto end_reconstitute;
  
+       if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+               veb->bridge_mode = BRIDGE_MODE_VEB;
+       else
+               veb->bridge_mode = BRIDGE_MODE_VEPA;
        i40e_config_bridge_mode(veb);
  
        /* create the remaining VSIs attached to this VEB */
@@@ -7686,8 -7680,12 +7690,8 @@@ static int i40e_sw_init(struct i40e_pf 
            (pf->hw.func_caps.fd_filters_best_effort > 0)) {
                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
                pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
 -              /* Setup a counter for fd_atr per PF */
 -              pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
                if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
                        pf->flags |= I40E_FLAG_FD_SB_ENABLED;
 -                      /* Setup a counter for fd_sb per PF */
 -                      pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
                } else {
                        dev_info(&pf->pdev->dev,
                                 "Flow Director Sideband mode Disabled in MFP mode\n");
@@@ -7777,8 -7775,7 +7781,8 @@@ bool i40e_set_ntuple(struct i40e_pf *pf
                pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
                pf->fdir_pf_active_filters = 0;
                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
 -              dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
 +              if (I40E_DEBUG_FD & pf->hw.debug_mask)
 +                      dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
                /* if ATR was auto disabled it can be re-enabled. */
                if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
@@@ -8038,7 -8035,12 +8042,12 @@@ static int i40e_ndo_bridge_setlink(stru
                } else if (mode != veb->bridge_mode) {
                        /* Existing HW bridge but different mode needs reset */
                        veb->bridge_mode = mode;
-                       i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+                       /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
+                       if (mode == BRIDGE_MODE_VEB)
+                               pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+                       else
+                               pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+                       i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
                        break;
                }
        }
@@@ -8350,11 -8352,12 +8359,12 @@@ static int i40e_add_vsi(struct i40e_vs
                ctxt.uplink_seid = vsi->uplink_seid;
                ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
                ctxt.flags = I40E_AQ_VSI_TYPE_PF;
-               if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+               if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
+                   (i40e_is_vsi_uplink_mode_veb(vsi))) {
                        ctxt.info.valid_sections |=
-                               cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+                            cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
                        ctxt.info.switch_id =
-                               cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+                          cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
                }
                i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
                break;
@@@ -8753,6 -8756,14 +8763,14 @@@ struct i40e_vsi *i40e_vsi_setup(struct 
                                         __func__);
                                return NULL;
                        }
+                       /* We come up by default in VEPA mode if SRIOV is not
+                        * already enabled, in which case we can't force VEPA
+                        * mode.
+                        */
+                       if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+                               veb->bridge_mode = BRIDGE_MODE_VEPA;
+                               pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+                       }
                        i40e_config_bridge_mode(veb);
                }
                for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
@@@ -9863,6 -9874,15 +9881,15 @@@ static int i40e_probe(struct pci_dev *p
                goto err_switch_setup;
        }
  
+ #ifdef CONFIG_PCI_IOV
+       /* prep for VF support */
+       if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+           (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+           !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+               if (pci_num_vf(pdev))
+                       pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+       }
+ #endif
        err = i40e_setup_pf_switch(pf, false);
        if (err) {
                dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
index cc82a7ffacb06326f259ff3e8c1f0fc4ec6e9838,9d95042d5a0f5805824d53ecc847ff76a9909444..9a4f2bc70cd2cb5494576f5530a7447502cd3c91
@@@ -165,6 -165,9 +165,6 @@@ int i40e_program_fdir_filter(struct i40
        tx_desc->cmd_type_offset_bsz =
                build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
  
 -      /* set the timestamp */
 -      tx_buf->time_stamp = jiffies;
 -
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.
         */
@@@ -280,8 -283,7 +280,8 @@@ static int i40e_add_del_fdir_tcpv4(stru
        if (add) {
                pf->fd_tcp_rule++;
                if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
 -                      dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
 +                      if (I40E_DEBUG_FD & pf->hw.debug_mask)
 +                              dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
                        pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                }
        } else {
                                  (pf->fd_tcp_rule - 1) : 0;
                if (pf->fd_tcp_rule == 0) {
                        pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
 -                      dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
 +                      if (I40E_DEBUG_FD & pf->hw.debug_mask)
 +                              dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
                }
        }
  
@@@ -500,8 -501,7 +500,8 @@@ static void i40e_fd_handle_status(struc
                        if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                            !(pf->auto_disable_flags &
                                     I40E_FLAG_FD_SB_ENABLED)) {
 -                              dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
 +                              if (I40E_DEBUG_FD & pf->hw.debug_mask)
 +                                      dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
                                pf->auto_disable_flags |=
                                                        I40E_FLAG_FD_SB_ENABLED;
                        }
@@@ -807,6 -807,10 +807,6 @@@ static bool i40e_clean_tx_irq(struct i4
                         tx_ring->vsi->seid,
                         tx_ring->queue_index,
                         tx_ring->next_to_use, i);
 -              dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
 -                       "  time_stamp           <%lx>\n"
 -                       "  jiffies              <%lx>\n",
 -                       tx_ring->tx_bi[i].time_stamp, jiffies);
  
                netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
  
@@@ -1649,6 -1653,9 +1649,6 @@@ static int i40e_clean_rx_irq_ps(struct 
                /* ERR_MASK will only have valid bits if EOP set */
                if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
 -                      /* TODO: shouldn't we increment a counter indicating the
 -                       * drop?
 -                       */
                        continue;
                }
  
                skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
                i40e_receive_skb(rx_ring, skb, vlan_tag);
  
 -              rx_ring->netdev->last_rx = jiffies;
                rx_desc->wb.qword1.status_error_len = 0;
  
        } while (likely(total_rx_packets < budget));
@@@ -1813,6 -1821,7 +1813,6 @@@ static int i40e_clean_rx_irq_1buf(struc
  #endif
                i40e_receive_skb(rx_ring, skb, vlan_tag);
  
 -              rx_ring->netdev->last_rx = jiffies;
                rx_desc->wb.qword1.status_error_len = 0;
        } while (likely(total_rx_packets < budget));
  
@@@ -1916,11 -1925,11 +1916,11 @@@ int i40e_napi_poll(struct napi_struct *
   * i40e_atr - Add a Flow Director ATR filter
   * @tx_ring:  ring to add programming descriptor to
   * @skb:      send buffer
 - * @flags:    send flags
 + * @tx_flags: send tx flags
   * @protocol: wire protocol
   **/
  static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
 -                   u32 flags, __be16 protocol)
 +                   u32 tx_flags, __be16 protocol)
  {
        struct i40e_filter_program_desc *fdir_desc;
        struct i40e_pf *pf = tx_ring->vsi->back;
        if (!tx_ring->atr_sample_rate)
                return;
  
 -      /* snag network header to get L4 type and address */
 -      hdr.network = skb_network_header(skb);
 +      if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
 +              return;
  
 -      /* Currently only IPv4/IPv6 with TCP is supported */
 -      if (protocol == htons(ETH_P_IP)) {
 -              if (hdr.ipv4->protocol != IPPROTO_TCP)
 -                      return;
 +      if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
 +              /* snag network header to get L4 type and address */
 +              hdr.network = skb_network_header(skb);
  
 -              /* access ihl as a u8 to avoid unaligned access on ia64 */
 -              hlen = (hdr.network[0] & 0x0F) << 2;
 -      } else if (protocol == htons(ETH_P_IPV6)) {
 -              if (hdr.ipv6->nexthdr != IPPROTO_TCP)
 +              /* Currently only IPv4/IPv6 with TCP is supported
 +               * access ihl as u8 to avoid unaligned access on ia64
 +               */
 +              if (tx_flags & I40E_TX_FLAGS_IPV4)
 +                      hlen = (hdr.network[0] & 0x0F) << 2;
 +              else if (protocol == htons(ETH_P_IPV6))
 +                      hlen = sizeof(struct ipv6hdr);
 +              else
                        return;
 -
 -              hlen = sizeof(struct ipv6hdr);
        } else {
 -              return;
 +              hdr.network = skb_inner_network_header(skb);
 +              hlen = skb_inner_network_header_len(skb);
        }
  
 +      /* Currently only IPv4/IPv6 with TCP is supported
 +       * Note: tx_flags gets modified to reflect inner protocols in
 +       * tx_enable_csum function if encap is enabled.
 +       */
 +      if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
 +          (hdr.ipv4->protocol != IPPROTO_TCP))
 +              return;
 +      else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
 +               (hdr.ipv6->nexthdr != IPPROTO_TCP))
 +              return;
 +
        th = (struct tcphdr *)(hdr.network + hlen);
  
        /* Due to lack of space, no more new filters can be programmed */
                     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
  
        dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
 -      dtype_cmd |=
 -              ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
 -              I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 +      if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
 +              dtype_cmd |=
 +                      ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
 +                      I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
 +                      I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 +      else
 +              dtype_cmd |=
 +                      ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
 +                      I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
 +                      I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
  
        fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
        fdir_desc->rsvd = cpu_to_le32(0);
   * otherwise  returns 0 to indicate the flags has been set properly.
   **/
  #ifdef I40E_FCOE
 -int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
 -                             struct i40e_ring *tx_ring,
 -                             u32 *flags)
 -#else
 -static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
 +inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                                      struct i40e_ring *tx_ring,
                                      u32 *flags)
 +#else
 +static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
 +                                           struct i40e_ring *tx_ring,
 +                                           u32 *flags)
  #endif
  {
        __be16 protocol = skb->protocol;
   * i40e_tso - set up the tso context descriptor
   * @tx_ring:  ptr to the ring to send
   * @skb:      ptr to the skb we're sending
 - * @tx_flags: the collected send information
 - * @protocol: the send protocol
   * @hdr_len:  ptr to the size of the packet header
   * @cd_tunneling: ptr to context descriptor bits
   *
   * Returns 0 if no TSO can happen, 1 if tso is going, or error
   **/
  static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
 -                  u32 tx_flags, __be16 protocol, u8 *hdr_len,
 -                  u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
 +                  u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
 +                  u32 *cd_tunneling)
  {
        u32 cd_cmd, cd_tso_len, cd_mss;
        struct ipv6hdr *ipv6h;
@@@ -2229,12 -2220,12 +2229,12 @@@ static int i40e_tsyn(struct i40e_ring *
  /**
   * i40e_tx_enable_csum - Enable Tx checksum offloads
   * @skb: send buffer
 - * @tx_flags: Tx flags currently set
 + * @tx_flags: pointer to Tx flags currently set
   * @td_cmd: Tx descriptor command bits to set
   * @td_offset: Tx descriptor header offsets to set
   * @cd_tunneling: ptr to context desc bits
   **/
 -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
 +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                                u32 *td_cmd, u32 *td_offset,
                                struct i40e_ring *tx_ring,
                                u32 *cd_tunneling)
                switch (ip_hdr(skb)->protocol) {
                case IPPROTO_UDP:
                        l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
 +                      *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
                        break;
                default:
                        return;
                this_ipv6_hdr = inner_ipv6_hdr(skb);
                this_tcp_hdrlen = inner_tcp_hdrlen(skb);
  
 -              if (tx_flags & I40E_TX_FLAGS_IPV4) {
 -
 -                      if (tx_flags & I40E_TX_FLAGS_TSO) {
 +              if (*tx_flags & I40E_TX_FLAGS_IPV4) {
 +                      if (*tx_flags & I40E_TX_FLAGS_TSO) {
                                *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
                                ip_hdr(skb)->check = 0;
                        } else {
                                *cd_tunneling |=
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
 -              } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
 +              } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                        *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
 -                      if (tx_flags & I40E_TX_FLAGS_TSO)
 +                      if (*tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
                }
  
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
                if (this_ip_hdr->version == 6) {
 -                      tx_flags &= ~I40E_TX_FLAGS_IPV4;
 -                      tx_flags |= I40E_TX_FLAGS_IPV6;
 +                      *tx_flags &= ~I40E_TX_FLAGS_IPV4;
 +                      *tx_flags |= I40E_TX_FLAGS_IPV6;
                }
        } else {
                network_hdr_len = skb_network_header_len(skb);
        }
  
        /* Enable IP checksum offloads */
 -      if (tx_flags & I40E_TX_FLAGS_IPV4) {
 +      if (*tx_flags & I40E_TX_FLAGS_IPV4) {
                l4_hdr = this_ip_hdr->protocol;
                /* the stack computes the IP header already, the only time we
                 * need the hardware to recompute it is in the case of TSO.
                 */
 -              if (tx_flags & I40E_TX_FLAGS_TSO) {
 +              if (*tx_flags & I40E_TX_FLAGS_TSO) {
                        *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
                        this_ip_hdr->check = 0;
                } else {
                /* Now set the td_offset for IP header length */
                *td_offset = (network_hdr_len >> 2) <<
                              I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
 -      } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
 +      } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                l4_hdr = this_ipv6_hdr->nexthdr;
                *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
                /* Now set the td_offset for IP header length */
@@@ -2405,9 -2396,9 +2405,9 @@@ static inline int __i40e_maybe_stop_tx(
   * Returns 0 if stop is not needed
   **/
  #ifdef I40E_FCOE
 -int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 +inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  #else
 -static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 +static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  #endif
  {
        if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
   * i40e_chk_linearize - Check if there are more than 8 fragments per packet
   * @skb:      send buffer
   * @tx_flags: collected send information
-  * @hdr_len:  size of the packet header
   *
   * Note: Our HW can't scatter-gather more than 8 fragments to build
   * a packet on the wire and so we need to figure out the cases where we
   * need to linearize the skb.
   **/
- static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
-                              const u8 hdr_len)
+ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
  {
        struct skb_frag_struct *frag;
        bool linearize = false;
        gso_segs = skb_shinfo(skb)->gso_segs;
  
        if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
-               u16 j = 1;
+               u16 j = 0;
  
                if (num_frags < (I40E_MAX_BUFFER_TXD))
                        goto linearize_chk_done;
                        goto linearize_chk_done;
                }
                frag = &skb_shinfo(skb)->frags[0];
-               size = hdr_len;
                /* we might still have more fragments per segment */
                do {
                        size += skb_frag_size(frag);
                        frag++; j++;
+                       if ((size >= skb_shinfo(skb)->gso_size) &&
+                           (j < I40E_MAX_BUFFER_TXD)) {
+                               size = (size % skb_shinfo(skb)->gso_size);
+                               j = (size) ? 1 : 0;
+                       }
                        if (j == I40E_MAX_BUFFER_TXD) {
-                               if (size < skb_shinfo(skb)->gso_size) {
-                                       linearize = true;
-                                       break;
-                               }
-                               j = 1;
-                               size -= skb_shinfo(skb)->gso_size;
-                               if (size)
-                                       j++;
-                               size += hdr_len;
+                               linearize = true;
+                               break;
                        }
                        num_frags--;
                } while (num_frags);
@@@ -2487,13 -2473,13 +2482,13 @@@ linearize_chk_done
   * @td_offset: offset for checksum or crc
   **/
  #ifdef I40E_FCOE
 -void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 -               struct i40e_tx_buffer *first, u32 tx_flags,
 -               const u8 hdr_len, u32 td_cmd, u32 td_offset)
 -#else
 -static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 +inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                        struct i40e_tx_buffer *first, u32 tx_flags,
                        const u8 hdr_len, u32 td_cmd, u32 td_offset)
 +#else
 +static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 +                             struct i40e_tx_buffer *first, u32 tx_flags,
 +                             const u8 hdr_len, u32 td_cmd, u32 td_offset)
  #endif
  {
        unsigned int data_len = skb->data_len;
                                                 tx_ring->queue_index),
                             first->bytecount);
  
 -      /* set the timestamp */
 -      first->time_stamp = jiffies;
 -
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@@ -2651,11 -2640,11 +2646,11 @@@ dma_error
   * one descriptor.
   **/
  #ifdef I40E_FCOE
 -int i40e_xmit_descriptor_count(struct sk_buff *skb,
 -                             struct i40e_ring *tx_ring)
 -#else
 -static int i40e_xmit_descriptor_count(struct sk_buff *skb,
 +inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
                                      struct i40e_ring *tx_ring)
 +#else
 +static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
 +                                           struct i40e_ring *tx_ring)
  #endif
  {
        unsigned int f;
@@@ -2717,7 -2706,7 +2712,7 @@@ static netdev_tx_t i40e_xmit_frame_ring
        else if (protocol == htons(ETH_P_IPV6))
                tx_flags |= I40E_TX_FLAGS_IPV6;
  
 -      tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
 +      tso = i40e_tso(tx_ring, skb, &hdr_len,
                       &cd_type_cmd_tso_mss, &cd_tunneling);
  
        if (tso < 0)
        if (tsyn)
                tx_flags |= I40E_TX_FLAGS_TSYN;
  
-       if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+       if (i40e_chk_linearize(skb, tx_flags))
                if (skb_linearize(skb))
                        goto out_drop;
  
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                tx_flags |= I40E_TX_FLAGS_CSUM;
  
 -              i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
 +              i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
                                    tx_ring, &cd_tunneling);
        }
  
index 4653b6e653c9470da76e9b35be2bd3767da7bf5b,4e9376da051829969de7750c2dc7a66acc5e5f40..23f95cdbdfcc2c20d5913fbab3a2b71a1bb61064
@@@ -980,13 -980,6 +980,13 @@@ static int i40e_pci_sriov_enable(struc
        int pre_existing_vfs = pci_num_vf(pdev);
        int err = 0;
  
 +      if (pf->state & __I40E_TESTING) {
 +              dev_warn(&pdev->dev,
 +                       "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
 +              err = -EPERM;
 +              goto err_out;
 +      }
 +
        dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
                i40e_free_vfs(pf);
@@@ -1025,11 -1018,19 +1025,19 @@@ int i40e_pci_sriov_configure(struct pci
  {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
  
-       if (num_vfs)
+       if (num_vfs) {
+               if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+                       pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+                       i40e_do_reset_safe(pf,
+                                          BIT_ULL(__I40E_PF_RESET_REQUESTED));
+               }
                return i40e_pci_sriov_enable(pdev, num_vfs);
+       }
  
        if (!pci_vfs_assigned(pf->pdev)) {
                i40e_free_vfs(pf);
+               pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+               i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
        } else {
                dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
                return -EINVAL;
index ec7e220757db1e283cb385f58f0b5955f50e2b25,458fbb421090772d0bbc1620277624339e0cd757..f54996f196293d8cf0c1942effe40c2e0e77b77e
@@@ -322,6 -322,10 +322,6 @@@ static bool i40e_clean_tx_irq(struct i4
                         tx_ring->vsi->seid,
                         tx_ring->queue_index,
                         tx_ring->next_to_use, i);
 -              dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
 -                       "  time_stamp           <%lx>\n"
 -                       "  jiffies              <%lx>\n",
 -                       tx_ring->tx_bi[i].time_stamp, jiffies);
  
                netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
  
@@@ -1124,6 -1128,9 +1124,6 @@@ static int i40e_clean_rx_irq_ps(struct 
                /* ERR_MASK will only have valid bits if EOP set */
                if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
 -                      /* TODO: shouldn't we increment a counter indicating the
 -                       * drop?
 -                       */
                        continue;
                }
  
                skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
                i40e_receive_skb(rx_ring, skb, vlan_tag);
  
 -              rx_ring->netdev->last_rx = jiffies;
                rx_desc->wb.qword1.status_error_len = 0;
  
        } while (likely(total_rx_packets < budget));
@@@ -1263,6 -1271,7 +1263,6 @@@ static int i40e_clean_rx_irq_1buf(struc
                         : 0;
                i40e_receive_skb(rx_ring, skb, vlan_tag);
  
 -              rx_ring->netdev->last_rx = jiffies;
                rx_desc->wb.qword1.status_error_len = 0;
        } while (likely(total_rx_packets < budget));
  
@@@ -1343,7 -1352,7 +1343,7 @@@ int i40evf_napi_poll(struct napi_struc
  }
  
  /**
 - * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
 + * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
   * @skb:     send buffer
   * @tx_ring: ring to send buffer on
   * @flags:   the tx flags to be set
   * Returns error code indicate the frame should be dropped upon error and the
   * otherwise  returns 0 to indicate the flags has been set properly.
   **/
 -static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
 -                                    struct i40e_ring *tx_ring,
 -                                    u32 *flags)
 +static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
 +                                             struct i40e_ring *tx_ring,
 +                                             u32 *flags)
  {
        __be16 protocol = skb->protocol;
        u32  tx_flags = 0;
   * i40e_tso - set up the tso context descriptor
   * @tx_ring:  ptr to the ring to send
   * @skb:      ptr to the skb we're sending
 - * @tx_flags: the collected send information
 - * @protocol: the send protocol
   * @hdr_len:  ptr to the size of the packet header
   * @cd_tunneling: ptr to context descriptor bits
   *
   * Returns 0 if no TSO can happen, 1 if tso is going, or error
   **/
  static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
 -                  u32 tx_flags, __be16 protocol, u8 *hdr_len,
 -                  u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
 +                  u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
 +                  u32 *cd_tunneling)
  {
        u32 cd_cmd, cd_tso_len, cd_mss;
        struct ipv6hdr *ipv6h;
  /**
   * i40e_tx_enable_csum - Enable Tx checksum offloads
   * @skb: send buffer
 - * @tx_flags: Tx flags currently set
 + * @tx_flags: pointer to Tx flags currently set
   * @td_cmd: Tx descriptor command bits to set
   * @td_offset: Tx descriptor header offsets to set
   * @cd_tunneling: ptr to context desc bits
   **/
 -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
 +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                                u32 *td_cmd, u32 *td_offset,
                                struct i40e_ring *tx_ring,
                                u32 *cd_tunneling)
                switch (ip_hdr(skb)->protocol) {
                case IPPROTO_UDP:
                        l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
 +                      *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
                        break;
                default:
                        return;
                this_ipv6_hdr = inner_ipv6_hdr(skb);
                this_tcp_hdrlen = inner_tcp_hdrlen(skb);
  
 -              if (tx_flags & I40E_TX_FLAGS_IPV4) {
 -
 -                      if (tx_flags & I40E_TX_FLAGS_TSO) {
 +              if (*tx_flags & I40E_TX_FLAGS_IPV4) {
 +                      if (*tx_flags & I40E_TX_FLAGS_TSO) {
                                *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
                                ip_hdr(skb)->check = 0;
                        } else {
                                *cd_tunneling |=
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
 -              } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
 +              } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                        *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
 -                      if (tx_flags & I40E_TX_FLAGS_TSO)
 +                      if (*tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
                }
  
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
                if (this_ip_hdr->version == 6) {
 -                      tx_flags &= ~I40E_TX_FLAGS_IPV4;
 -                      tx_flags |= I40E_TX_FLAGS_IPV6;
 +                      *tx_flags &= ~I40E_TX_FLAGS_IPV4;
 +                      *tx_flags |= I40E_TX_FLAGS_IPV6;
                }
  
  
        }
  
        /* Enable IP checksum offloads */
 -      if (tx_flags & I40E_TX_FLAGS_IPV4) {
 +      if (*tx_flags & I40E_TX_FLAGS_IPV4) {
                l4_hdr = this_ip_hdr->protocol;
                /* the stack computes the IP header already, the only time we
                 * need the hardware to recompute it is in the case of TSO.
                 */
 -              if (tx_flags & I40E_TX_FLAGS_TSO) {
 +              if (*tx_flags & I40E_TX_FLAGS_TSO) {
                        *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
                        this_ip_hdr->check = 0;
                } else {
                /* Now set the td_offset for IP header length */
                *td_offset = (network_hdr_len >> 2) <<
                              I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
 -      } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
 +      } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                l4_hdr = this_ipv6_hdr->nexthdr;
                *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
                /* Now set the td_offset for IP header length */
@@@ -1608,14 -1619,12 +1608,12 @@@ static void i40e_create_tx_ctx(struct i
   * i40e_chk_linearize - Check if there are more than 8 fragments per packet
   * @skb:      send buffer
   * @tx_flags: collected send information
-  * @hdr_len:  size of the packet header
   *
   * Note: Our HW can't scatter-gather more than 8 fragments to build
   * a packet on the wire and so we need to figure out the cases where we
   * need to linearize the skb.
   **/
- static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
-                              const u8 hdr_len)
+ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
  {
        struct skb_frag_struct *frag;
        bool linearize = false;
        gso_segs = skb_shinfo(skb)->gso_segs;
  
        if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
-               u16 j = 1;
+               u16 j = 0;
  
                if (num_frags < (I40E_MAX_BUFFER_TXD))
                        goto linearize_chk_done;
                        goto linearize_chk_done;
                }
                frag = &skb_shinfo(skb)->frags[0];
-               size = hdr_len;
                /* we might still have more fragments per segment */
                do {
                        size += skb_frag_size(frag);
                        frag++; j++;
+                       if ((size >= skb_shinfo(skb)->gso_size) &&
+                           (j < I40E_MAX_BUFFER_TXD)) {
+                               size = (size % skb_shinfo(skb)->gso_size);
+                               j = (size) ? 1 : 0;
+                       }
                        if (j == I40E_MAX_BUFFER_TXD) {
-                               if (size < skb_shinfo(skb)->gso_size) {
-                                       linearize = true;
-                                       break;
-                               }
-                               j = 1;
-                               size -= skb_shinfo(skb)->gso_size;
-                               if (size)
-                                       j++;
-                               size += hdr_len;
+                               linearize = true;
+                               break;
                        }
                        num_frags--;
                } while (num_frags);
@@@ -1666,44 -1672,7 +1661,44 @@@ linearize_chk_done
  }
  
  /**
 - * i40e_tx_map - Build the Tx descriptor
 + * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
 + * @tx_ring: the ring to be checked
 + * @size:    the size buffer we want to assure is available
 + *
 + * Returns -EBUSY if a stop is needed, else 0
 + **/
 +static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 +{
 +      netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 +      /* Memory barrier before checking head and tail */
 +      smp_mb();
 +
 +      /* Check again in a case another CPU has just made room available. */
 +      if (likely(I40E_DESC_UNUSED(tx_ring) < size))
 +              return -EBUSY;
 +
 +      /* A reprieve! - use start_queue because it doesn't call schedule */
 +      netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
 +      ++tx_ring->tx_stats.restart_queue;
 +      return 0;
 +}
 +
 +/**
 + * i40evf_maybe_stop_tx - 1st level check for tx stop conditions
 + * @tx_ring: the ring to be checked
 + * @size:    the size buffer we want to assure is available
 + *
 + * Returns 0 if stop is not needed
 + **/
 +static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 +{
 +      if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
 +              return 0;
 +      return __i40evf_maybe_stop_tx(tx_ring, size);
 +}
 +
 +/**
 + * i40evf_tx_map - Build the Tx descriptor
   * @tx_ring:  ring to send buffer on
   * @skb:      send buffer
   * @first:    first buffer info buffer to use
   * @td_cmd:   the command field in the descriptor
   * @td_offset: offset for checksum or crc
   **/
 -static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 -                      struct i40e_tx_buffer *first, u32 tx_flags,
 -                      const u8 hdr_len, u32 td_cmd, u32 td_offset)
 +static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 +                               struct i40e_tx_buffer *first, u32 tx_flags,
 +                               const u8 hdr_len, u32 td_cmd, u32 td_offset)
  {
        unsigned int data_len = skb->data_len;
        unsigned int size = skb_headlen(skb);
                                                 tx_ring->queue_index),
                             first->bytecount);
  
 -      /* set the timestamp */
 -      first->time_stamp = jiffies;
 -
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
  
        tx_ring->next_to_use = i;
  
 +      i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
        /* notify HW of packet */
 -      writel(i, tx_ring->tail);
 +      if (!skb->xmit_more ||
 +          netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
 +                                                 tx_ring->queue_index)))
 +              writel(i, tx_ring->tail);
  
        return;
  
@@@ -1863,7 -1831,44 +1858,7 @@@ dma_error
  }
  
  /**
 - * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
 - * @tx_ring: the ring to be checked
 - * @size:    the size buffer we want to assure is available
 - *
 - * Returns -EBUSY if a stop is needed, else 0
 - **/
 -static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 -{
 -      netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 -      /* Memory barrier before checking head and tail */
 -      smp_mb();
 -
 -      /* Check again in a case another CPU has just made room available. */
 -      if (likely(I40E_DESC_UNUSED(tx_ring) < size))
 -              return -EBUSY;
 -
 -      /* A reprieve! - use start_queue because it doesn't call schedule */
 -      netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
 -      ++tx_ring->tx_stats.restart_queue;
 -      return 0;
 -}
 -
 -/**
 - * i40e_maybe_stop_tx - 1st level check for tx stop conditions
 - * @tx_ring: the ring to be checked
 - * @size:    the size buffer we want to assure is available
 - *
 - * Returns 0 if stop is not needed
 - **/
 -static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 -{
 -      if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
 -              return 0;
 -      return __i40e_maybe_stop_tx(tx_ring, size);
 -}
 -
 -/**
 - * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
 + * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
   * @skb:     send buffer
   * @tx_ring: ring to send buffer on
   *
   * there is not enough descriptors available in this ring since we need at least
   * one descriptor.
   **/
 -static int i40e_xmit_descriptor_count(struct sk_buff *skb,
 -                                    struct i40e_ring *tx_ring)
 +static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
 +                                             struct i40e_ring *tx_ring)
  {
        unsigned int f;
        int count = 0;
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
  
        count += TXD_USE_COUNT(skb_headlen(skb));
 -      if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
 +      if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
                tx_ring->tx_stats.tx_busy++;
                return 0;
        }
@@@ -1913,11 -1918,11 +1908,11 @@@ static netdev_tx_t i40e_xmit_frame_ring
        u32 td_cmd = 0;
        u8 hdr_len = 0;
        int tso;
 -      if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
 +      if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
                return NETDEV_TX_BUSY;
  
        /* prepare the xmit flags */
 -      if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
 +      if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
                goto out_drop;
  
        /* obtain protocol of skb */
        else if (protocol == htons(ETH_P_IPV6))
                tx_flags |= I40E_TX_FLAGS_IPV6;
  
 -      tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
 +      tso = i40e_tso(tx_ring, skb, &hdr_len,
                       &cd_type_cmd_tso_mss, &cd_tunneling);
  
        if (tso < 0)
        else if (tso)
                tx_flags |= I40E_TX_FLAGS_TSO;
  
-       if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+       if (i40e_chk_linearize(skb, tx_flags))
                if (skb_linearize(skb))
                        goto out_drop;
  
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                tx_flags |= I40E_TX_FLAGS_CSUM;
  
 -              i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
 +              i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
                                    tx_ring, &cd_tunneling);
        }
  
        i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
                           cd_tunneling, cd_l2tag2);
  
 -      i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
 -                  td_cmd, td_offset);
 -
 -      i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
 +      i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
 +                    td_cmd, td_offset);
  
        return NETDEV_TX_OK;
  
diff --combined lib/rhashtable.c
index ca66a0e32c8eed1457f8b55291a09fd110f57272,8609378e6505123a3688e0e95a18cdde013e278a..a60a6d335a91a6aa90f019f77062e7be069939fd
@@@ -26,6 -26,7 +26,7 @@@
  #include <linux/random.h>
  #include <linux/rhashtable.h>
  #include <linux/err.h>
+ #include <linux/export.h>
  
  #define HASH_DEFAULT_SIZE     64UL
  #define HASH_MIN_SIZE         4U
@@@ -584,6 -585,7 +585,6 @@@ void *rhashtable_walk_next(struct rhash
        struct bucket_table *tbl = iter->walker->tbl;
        struct rhashtable *ht = iter->ht;
        struct rhash_head *p = iter->p;
 -      void *obj = NULL;
  
        if (p) {
                p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
@@@ -603,7 -605,8 +604,7 @@@ next
                if (!rht_is_a_nulls(p)) {
                        iter->skip++;
                        iter->p = p;
 -                      obj = rht_obj(ht, p);
 -                      goto out;
 +                      return rht_obj(ht, p);
                }
  
                iter->skip = 0;
  
        iter->p = NULL;
  
 -out:
 -
 -      return obj;
 +      return NULL;
  }
  EXPORT_SYMBOL_GPL(rhashtable_walk_next);
  
diff --combined net/bridge/br_fdb.c
index 7896cf143045b9342334af53db29db2f1849747c,659fb96672e41e2e6525323697ca23a41d271fbb..cecb482ed919e1862a15a0142eeb810d08a8ddfc
@@@ -736,12 -736,6 +736,12 @@@ static int fdb_add_entry(struct net_bri
        struct net_bridge_fdb_entry *fdb;
        bool modified = false;
  
 +      /* If the port cannot learn allow only local and static entries */
 +      if (!(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
 +          !(source->state == BR_STATE_LEARNING ||
 +            source->state == BR_STATE_FORWARDING))
 +              return -EPERM;
 +
        fdb = fdb_find(head, addr, vid);
        if (fdb == NULL) {
                if (!(flags & NLM_F_CREATE))
@@@ -802,9 -796,11 +802,11 @@@ static int __br_fdb_add(struct ndmsg *n
        int err = 0;
  
        if (ndm->ndm_flags & NTF_USE) {
+               local_bh_disable();
                rcu_read_lock();
                br_fdb_update(p->br, p, addr, vid, true);
                rcu_read_unlock();
+               local_bh_enable();
        } else {
                spin_lock_bh(&p->br->hash_lock);
                err = fdb_add_entry(p, addr, ndm->ndm_state,
diff --combined net/core/dev.c
index 0602e917a3053ac9be3cc7f1f495e3eb7cdeadc5,aa82f9ab6a36d164769bf7c9633fcdfd5971466f..6778a9999d525307d5bd41a1750a6e96a6e22bf3
  #include <linux/if_macvlan.h>
  #include <linux/errqueue.h>
  #include <linux/hrtimer.h>
 +#include <linux/netfilter_ingress.h>
  
  #include "net-sysfs.h"
  
@@@ -469,14 -468,10 +469,14 @@@ EXPORT_SYMBOL(dev_remove_pack)
   */
  void dev_add_offload(struct packet_offload *po)
  {
 -      struct list_head *head = &offload_base;
 +      struct packet_offload *elem;
  
        spin_lock(&offload_lock);
 -      list_add_rcu(&po->list, head);
 +      list_for_each_entry(elem, &offload_base, list) {
 +              if (po->priority < elem->priority)
 +                      break;
 +      }
 +      list_add_rcu(&po->list, elem->list.prev);
        spin_unlock(&offload_lock);
  }
  EXPORT_SYMBOL(dev_add_offload);
@@@ -1635,7 -1630,7 +1635,7 @@@ int call_netdevice_notifiers(unsigned l
  }
  EXPORT_SYMBOL(call_netdevice_notifiers);
  
 -#ifdef CONFIG_NET_CLS_ACT
 +#ifdef CONFIG_NET_INGRESS
  static struct static_key ingress_needed __read_mostly;
  
  void net_inc_ingress_queue(void)
@@@ -1723,15 -1718,8 +1723,8 @@@ EXPORT_SYMBOL_GPL(is_skb_forwardable)
  
  int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
  {
-       if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
-               if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
-                       atomic_long_inc(&dev->rx_dropped);
-                       kfree_skb(skb);
-                       return NET_RX_DROP;
-               }
-       }
-       if (unlikely(!is_skb_forwardable(dev, skb))) {
+       if (skb_orphan_frags(skb, GFP_ATOMIC) ||
+           unlikely(!is_skb_forwardable(dev, skb))) {
                atomic_long_inc(&dev->rx_dropped);
                kfree_skb(skb);
                return NET_RX_DROP;
@@@ -2355,34 -2343,6 +2348,34 @@@ void netif_device_attach(struct net_dev
  }
  EXPORT_SYMBOL(netif_device_attach);
  
 +/*
 + * Returns a Tx hash based on the given packet descriptor a Tx queues' number
 + * to be used as a distribution range.
 + */
 +u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
 +                unsigned int num_tx_queues)
 +{
 +      u32 hash;
 +      u16 qoffset = 0;
 +      u16 qcount = num_tx_queues;
 +
 +      if (skb_rx_queue_recorded(skb)) {
 +              hash = skb_get_rx_queue(skb);
 +              while (unlikely(hash >= num_tx_queues))
 +                      hash -= num_tx_queues;
 +              return hash;
 +      }
 +
 +      if (dev->num_tc) {
 +              u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
 +              qoffset = dev->tc_to_txq[tc].offset;
 +              qcount = dev->tc_to_txq[tc].count;
 +      }
 +
 +      return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
 +}
 +EXPORT_SYMBOL(__skb_tx_hash);
 +
  static void skb_warn_bad_offload(const struct sk_buff *skb)
  {
        static const netdev_features_t null_features = 0;
@@@ -2941,84 -2901,6 +2934,84 @@@ int dev_loopback_xmit(struct sock *sk, 
  }
  EXPORT_SYMBOL(dev_loopback_xmit);
  
 +static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
 +{
 +#ifdef CONFIG_XPS
 +      struct xps_dev_maps *dev_maps;
 +      struct xps_map *map;
 +      int queue_index = -1;
 +
 +      rcu_read_lock();
 +      dev_maps = rcu_dereference(dev->xps_maps);
 +      if (dev_maps) {
 +              map = rcu_dereference(
 +                  dev_maps->cpu_map[skb->sender_cpu - 1]);
 +              if (map) {
 +                      if (map->len == 1)
 +                              queue_index = map->queues[0];
 +                      else
 +                              queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
 +                                                                         map->len)];
 +                      if (unlikely(queue_index >= dev->real_num_tx_queues))
 +                              queue_index = -1;
 +              }
 +      }
 +      rcu_read_unlock();
 +
 +      return queue_index;
 +#else
 +      return -1;
 +#endif
 +}
 +
 +static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
 +{
 +      struct sock *sk = skb->sk;
 +      int queue_index = sk_tx_queue_get(sk);
 +
 +      if (queue_index < 0 || skb->ooo_okay ||
 +          queue_index >= dev->real_num_tx_queues) {
 +              int new_index = get_xps_queue(dev, skb);
 +              if (new_index < 0)
 +                      new_index = skb_tx_hash(dev, skb);
 +
 +              if (queue_index != new_index && sk &&
 +                  rcu_access_pointer(sk->sk_dst_cache))
 +                      sk_tx_queue_set(sk, new_index);
 +
 +              queue_index = new_index;
 +      }
 +
 +      return queue_index;
 +}
 +
 +struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 +                                  struct sk_buff *skb,
 +                                  void *accel_priv)
 +{
 +      int queue_index = 0;
 +
 +#ifdef CONFIG_XPS
 +      if (skb->sender_cpu == 0)
 +              skb->sender_cpu = raw_smp_processor_id() + 1;
 +#endif
 +
 +      if (dev->real_num_tx_queues != 1) {
 +              const struct net_device_ops *ops = dev->netdev_ops;
 +              if (ops->ndo_select_queue)
 +                      queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
 +                                                          __netdev_pick_tx);
 +              else
 +                      queue_index = __netdev_pick_tx(dev, skb);
 +
 +              if (!accel_priv)
 +                      queue_index = netdev_cap_txqueue(dev, queue_index);
 +      }
 +
 +      skb_set_queue_mapping(skb, queue_index);
 +      return netdev_get_tx_queue(dev, queue_index);
 +}
 +
  /**
   *    __dev_queue_xmit - transmit a buffer
   *    @skb: buffer to transmit
@@@ -3631,47 -3513,66 +3624,47 @@@ int (*br_fdb_test_addr_hook)(struct net
  EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
  #endif
  
 -#ifdef CONFIG_NET_CLS_ACT
 -/* TODO: Maybe we should just force sch_ingress to be compiled in
 - * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
 - * a compare and 2 stores extra right now if we dont have it on
 - * but have CONFIG_NET_CLS_ACT
 - * NOTE: This doesn't stop any functionality; if you dont have
 - * the ingress scheduler, you just can't add policies on ingress.
 - *
 - */
 -static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
 -{
 -      struct net_device *dev = skb->dev;
 -      u32 ttl = G_TC_RTTL(skb->tc_verd);
 -      int result = TC_ACT_OK;
 -      struct Qdisc *q;
 -
 -      if (unlikely(MAX_RED_LOOP < ttl++)) {
 -              net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
 -                                   skb->skb_iif, dev->ifindex);
 -              return TC_ACT_SHOT;
 -      }
 -
 -      skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
 -      skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
 -
 -      q = rcu_dereference(rxq->qdisc);
 -      if (q != &noop_qdisc) {
 -              spin_lock(qdisc_lock(q));
 -              if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
 -                      result = qdisc_enqueue_root(skb, q);
 -              spin_unlock(qdisc_lock(q));
 -      }
 -
 -      return result;
 -}
 -
  static inline struct sk_buff *handle_ing(struct sk_buff *skb,
                                         struct packet_type **pt_prev,
                                         int *ret, struct net_device *orig_dev)
  {
 -      struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
 +#ifdef CONFIG_NET_CLS_ACT
 +      struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
 +      struct tcf_result cl_res;
  
 -      if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
 +      /* If there's at least one ingress present somewhere (so
 +       * we get here via enabled static key), remaining devices
 +       * that are not configured with an ingress qdisc will bail
 +       * out here.
 +       */
 +      if (!cl)
                return skb;
 -
        if (*pt_prev) {
                *ret = deliver_skb(skb, *pt_prev, orig_dev);
                *pt_prev = NULL;
        }
  
 -      switch (ing_filter(skb, rxq)) {
 +      qdisc_skb_cb(skb)->pkt_len = skb->len;
 +      skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
 +      qdisc_bstats_update_cpu(cl->q, skb);
 +
 +      switch (tc_classify(skb, cl, &cl_res)) {
 +      case TC_ACT_OK:
 +      case TC_ACT_RECLASSIFY:
 +              skb->tc_index = TC_H_MIN(cl_res.classid);
 +              break;
        case TC_ACT_SHOT:
 +              qdisc_qstats_drop_cpu(cl->q);
        case TC_ACT_STOLEN:
 +      case TC_ACT_QUEUED:
                kfree_skb(skb);
                return NULL;
 +      default:
 +              break;
        }
 -
 +#endif /* CONFIG_NET_CLS_ACT */
        return skb;
  }
 -#endif
  
  /**
   *    netdev_rx_handler_register - register receive handler
@@@ -3744,22 -3645,6 +3737,22 @@@ static bool skb_pfmemalloc_protocol(str
        }
  }
  
 +static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
 +                           int *ret, struct net_device *orig_dev)
 +{
 +#ifdef CONFIG_NETFILTER_INGRESS
 +      if (nf_hook_ingress_active(skb)) {
 +              if (*pt_prev) {
 +                      *ret = deliver_skb(skb, *pt_prev, orig_dev);
 +                      *pt_prev = NULL;
 +              }
 +
 +              return nf_hook_ingress(skb);
 +      }
 +#endif /* CONFIG_NETFILTER_INGRESS */
 +      return 0;
 +}
 +
  static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
  {
        struct packet_type *ptype, *pt_prev;
@@@ -3819,17 -3704,13 +3812,17 @@@ another_round
        }
  
  skip_taps:
 -#ifdef CONFIG_NET_CLS_ACT
 +#ifdef CONFIG_NET_INGRESS
        if (static_key_false(&ingress_needed)) {
                skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
                if (!skb)
                        goto unlock;
 -      }
  
 +              if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
 +                      goto unlock;
 +      }
 +#endif
 +#ifdef CONFIG_NET_CLS_ACT
        skb->tc_verd = 0;
  ncls:
  #endif
@@@ -6432,17 -6313,6 +6425,17 @@@ static int netif_alloc_netdev_queues(st
        return 0;
  }
  
 +void netif_tx_stop_all_queues(struct net_device *dev)
 +{
 +      unsigned int i;
 +
 +      for (i = 0; i < dev->num_tx_queues; i++) {
 +              struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 +              netif_tx_stop_queue(txq);
 +      }
 +}
 +EXPORT_SYMBOL(netif_tx_stop_all_queues);
 +
  /**
   *    register_netdevice      - register a network device
   *    @dev: device to register
@@@ -6992,9 -6862,6 +6985,9 @@@ struct net_device *alloc_netdev_mqs(in
        dev->group = INIT_NETDEV_GROUP;
        if (!dev->ethtool_ops)
                dev->ethtool_ops = &default_ethtool_ops;
 +
 +      nf_hook_ingress_init(dev);
 +
        return dev;
  
  free_all: