Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Fri, 21 Aug 2015 18:44:04 +0000 (11:44 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 21 Aug 2015 18:44:04 +0000 (11:44 -0700)
Conflicts:
drivers/net/usb/qmi_wwan.c

Overlapping additions of new device IDs to qmi_wwan.c

Signed-off-by: David S. Miller <davem@davemloft.net>
12 files changed:
1  2 
MAINTAINERS
arch/arm/boot/dts/dra7.dtsi
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/phy/phy.c
drivers/net/usb/qmi_wwan.c
kernel/events/core.c
net/batman-adv/translation-table.c
net/bridge/br_multicast.c
net/ipv4/fib_trie.c
net/ipv6/ip6_fib.c
net/ipv6/route.c

diff --combined MAINTAINERS
index 7b528b8c73f8c613dbb6c1dd93f1419266006ee2,569568f6644f2092211b7bb2690c7defe49977dd..7b1b552630b8ed6a65b4ec60b6cef8d00cdd1552
@@@ -158,7 -158,6 +158,7 @@@ L: linux-wpan@vger.kernel.or
  S:    Maintained
  F:    net/6lowpan/
  F:    include/net/6lowpan.h
 +F:    Documentation/networking/6lowpan.txt
  
  6PACK NETWORK DRIVER FOR AX.25
  M:    Andreas Koensgen <ajk@comnets.uni-bremen.de>
@@@ -3588,6 -3587,15 +3588,15 @@@ S:    Maintaine
  F:    drivers/gpu/drm/rockchip/
  F:    Documentation/devicetree/bindings/video/rockchip*
  
+ DRM DRIVERS FOR STI
+ M:    Benjamin Gaignard <benjamin.gaignard@linaro.org>
+ M:    Vincent Abriou <vincent.abriou@st.com>
+ L:    dri-devel@lists.freedesktop.org
+ T:    git http://git.linaro.org/people/benjamin.gaignard/kernel.git
+ S:    Maintained
+ F:    drivers/gpu/drm/sti
+ F:    Documentation/devicetree/bindings/gpu/st,stih4xx.txt
  DSBR100 USB FM RADIO DRIVER
  M:    Alexey Klimov <klimov.linux@gmail.com>
  L:    linux-media@vger.kernel.org
@@@ -6511,7 -6519,7 +6520,7 @@@ F:      drivers/net/ethernet/marvell/mvneta.
  
  MARVELL MWIFIEX WIRELESS DRIVER
  M:    Amitkumar Karwar <akarwar@marvell.com>
 -M:    Avinash Patil <patila@marvell.com>
 +M:    Nishant Sarmukadam <nishants@marvell.com>
  L:    linux-wireless@vger.kernel.org
  S:    Maintained
  F:    drivers/net/wireless/mwifiex/
@@@ -6649,15 -6657,6 +6658,15 @@@ W:    http://www.mellanox.co
  Q:    http://patchwork.ozlabs.org/project/netdev/list/
  F:    drivers/net/ethernet/mellanox/mlx4/en_*
  
 +MELLANOX ETHERNET SWITCH DRIVERS
 +M:    Jiri Pirko <jiri@mellanox.com>
 +M:    Ido Schimmel <idosch@mellanox.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +W:    http://www.mellanox.com
 +Q:    http://patchwork.ozlabs.org/project/netdev/list/
 +F:    drivers/net/ethernet/mellanox/mlxsw/
 +
  MEMORY MANAGEMENT
  L:    linux-mm@kvack.org
  W:    http://www.linux-mm.org
@@@ -8922,13 -8921,6 +8931,13 @@@ F:    include/linux/dma/dw.
  F:    include/linux/platform_data/dma-dw.h
  F:    drivers/dma/dw/
  
 +SYNOPSYS DESIGNWARE ETHERNET QOS 4.10a driver
 +M: Lars Persson <lars.persson@axis.com>
 +L: netdev@vger.kernel.org
 +S: Supported
 +F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
 +F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
 +
  SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
  M:    Seungwon Jeon <tgih.jun@samsung.com>
  M:    Jaehoon Chung <jh80.chung@samsung.com>
index 0001e959bf49ad153bf50be8e2d6a515d75988a6,1e29ccf77ea24f56fd16f8960d2b585ccc7ce9fc..6dbbc02d18b4d635dc10b21cc2f73b04b8490e49
                                ranges = <0 0x2000 0x2000>;
  
                                scm_conf: scm_conf@0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon", "simple-bus";
                                        reg = <0x0 0x1400>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
                };
  
                mac: ethernet@4a100000 {
 -                      compatible = "ti,cpsw";
 +                      compatible = "ti,dra7-cpsw","ti,cpsw";
                        ti,hwmods = "gmac";
                        clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>;
                        clock-names = "fck", "cpts";
index 15cc3a1f12ff74b51af2f6139671c23dde996635,6ca693b03f33abbbdb6097544f2f4ee7f3928720..12687bf52b9518eaa1c4bb538ff26fcc88ce7acc
@@@ -681,14 -681,11 +681,14 @@@ void be_link_status_update(struct be_ad
  static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
  {
        struct be_tx_stats *stats = tx_stats(txo);
 +      u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
  
        u64_stats_update_begin(&stats->sync);
        stats->tx_reqs++;
        stats->tx_bytes += skb->len;
 -      stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
 +      stats->tx_pkts += tx_pkts;
 +      if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
 +              stats->tx_vxlan_offload_pkts += tx_pkts;
        u64_stats_update_end(&stats->sync);
  }
  
@@@ -1261,7 -1258,7 +1261,7 @@@ static bool be_send_pkt_to_bmc(struct b
        if (is_udp_pkt((*skb))) {
                struct udphdr *udp = udp_hdr((*skb));
  
 -              switch (udp->dest) {
 +              switch (ntohs(udp->dest)) {
                case DHCP_CLIENT_PORT:
                        os2bmc = is_dhcp_client_filt_enabled(adapter);
                        goto done;
@@@ -1964,8 -1961,6 +1964,8 @@@ static void be_rx_stats_update(struct b
        stats->rx_compl++;
        stats->rx_bytes += rxcp->pkt_size;
        stats->rx_pkts++;
 +      if (rxcp->tunneled)
 +              stats->rx_vxlan_offload_pkts++;
        if (rxcp->pkt_type == BE_MULTICAST_PACKET)
                stats->rx_mcast_pkts++;
        if (rxcp->err)
  
  static int be_setup_wol(struct be_adapter *adapter, bool enable)
  {
 +      struct device *dev = &adapter->pdev->dev;
        struct be_dma_mem cmd;
 -      int status = 0;
        u8 mac[ETH_ALEN];
 +      int status;
  
        eth_zero_addr(mac);
  
        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
 -      cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 -                                   GFP_KERNEL);
 +      cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
        if (!cmd.va)
                return -ENOMEM;
  
                                                PCICFG_PM_CONTROL_OFFSET,
                                                PCICFG_PM_CONTROL_MASK);
                if (status) {
 -                      dev_err(&adapter->pdev->dev,
 -                              "Could not enable Wake-on-lan\n");
 -                      dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
 -                                        cmd.dma);
 -                      return status;
 +                      dev_err(dev, "Could not enable Wake-on-lan\n");
 +                      goto err;
                }
 -              status = be_cmd_enable_magic_wol(adapter,
 -                                               adapter->netdev->dev_addr,
 -                                               &cmd);
 -              pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
 -              pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
        } else {
 -              status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
 -              pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
 -              pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
 +              ether_addr_copy(mac, adapter->netdev->dev_addr);
        }
  
 -      dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
 +      status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
 +      pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
 +      pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
 +err:
 +      dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
        return status;
  }
  
@@@ -4976,7 -4977,7 +4976,7 @@@ static bool be_check_ufi_compatibility(
  {
        if (!fhdr) {
                dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
 -              return -1;
 +              return false;
        }
  
        /* First letter of the build version is used to identify
@@@ -5131,6 -5132,9 +5131,6 @@@ static int be_ndo_bridge_getlink(struc
        int status = 0;
        u8 hsw_mode;
  
 -      if (!sriov_enabled(adapter))
 -              return 0;
 -
        /* BE and Lancer chips support VEB mode only */
        if (BEx_chip(adapter) || lancer_chip(adapter)) {
                hsw_mode = PORT_FWD_TYPE_VEB;
                                               NULL);
                if (status)
                        return 0;
 +
 +              if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
 +                      return 0;
        }
  
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
@@@ -5173,7 -5174,7 +5173,7 @@@ static void be_add_vxlan_port(struct ne
        struct device *dev = &adapter->pdev->dev;
        int status;
  
-       if (lancer_chip(adapter) || BEx_chip(adapter))
+       if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
                return;
  
        if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@@ -5220,7 -5221,7 +5220,7 @@@ static void be_del_vxlan_port(struct ne
  {
        struct be_adapter *adapter = netdev_priv(netdev);
  
-       if (lancer_chip(adapter) || BEx_chip(adapter))
+       if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
                return;
  
        if (adapter->vxlan_port != port)
@@@ -5277,27 -5278,6 +5277,27 @@@ static netdev_features_t be_features_ch
  }
  #endif
  
 +static int be_get_phys_port_id(struct net_device *dev,
 +                             struct netdev_phys_item_id *ppid)
 +{
 +      int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
 +      struct be_adapter *adapter = netdev_priv(dev);
 +      u8 *id;
 +
 +      if (MAX_PHYS_ITEM_ID_LEN < id_len)
 +              return -ENOSPC;
 +
 +      ppid->id[0] = adapter->hba_port_num + 1;
 +      id = &ppid->id[1];
 +      for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
 +           i--, id += CNTL_SERIAL_NUM_WORD_SZ)
 +              memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
 +
 +      ppid->id_len = id_len;
 +
 +      return 0;
 +}
 +
  static const struct net_device_ops be_netdev_ops = {
        .ndo_open               = be_open,
        .ndo_stop               = be_close,
        .ndo_del_vxlan_port     = be_del_vxlan_port,
        .ndo_features_check     = be_features_check,
  #endif
 +      .ndo_get_phys_port_id   = be_get_phys_port_id,
  };
  
  static void be_netdev_init(struct net_device *netdev)
@@@ -5887,6 -5866,7 +5887,6 @@@ static int be_pci_resume(struct pci_de
        if (status)
                return status;
  
 -      pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
  
        status = be_resume(adapter);
@@@ -5966,6 -5946,7 +5966,6 @@@ static pci_ers_result_t be_eeh_reset(st
                return PCI_ERS_RESULT_DISCONNECT;
  
        pci_set_master(pdev);
 -      pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
  
        /* Check if card is ok and fw is ready */
index 087ffcdc48a312d365ffb24ee4f7c16ddcf18edb,10b3bbbbac8e10e89c47b2d50bb661cc80ad616b..4b69d061d90f7983fb0ee4929b7f6074922d3690
  
  #define TX_TIMEOUT      (1*HZ)
  
 -const char gfar_driver_version[] = "1.3";
 +const char gfar_driver_version[] = "2.0";
  
  static int gfar_enet_open(struct net_device *dev);
  static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
  static void gfar_reset_task(struct work_struct *work);
  static void gfar_timeout(struct net_device *dev);
  static int gfar_close(struct net_device *dev);
 -static struct sk_buff *gfar_new_skb(struct net_device *dev,
 -                                  dma_addr_t *bufaddr);
 +static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
 +                              int alloc_cnt);
  static int gfar_set_mac_address(struct net_device *dev);
  static int gfar_change_mtu(struct net_device *dev, int new_mtu);
  static irqreturn_t gfar_error(int irq, void *dev_id);
@@@ -141,7 -141,8 +141,7 @@@ static void gfar_netpoll(struct net_dev
  #endif
  int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
  static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
 -static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
 -                             int amount_pull, struct napi_struct *napi);
 +static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
  static void gfar_halt_nodisable(struct gfar_private *priv);
  static void gfar_clear_exact_match(struct net_device *dev);
  static void gfar_set_mac_for_addr(struct net_device *dev, int num,
@@@ -168,15 -169,17 +168,15 @@@ static void gfar_init_rxbdp(struct gfar
        bdp->lstatus = cpu_to_be32(lstatus);
  }
  
 -static int gfar_init_bds(struct net_device *ndev)
 +static void gfar_init_bds(struct net_device *ndev)
  {
        struct gfar_private *priv = netdev_priv(ndev);
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        struct gfar_priv_tx_q *tx_queue = NULL;
        struct gfar_priv_rx_q *rx_queue = NULL;
        struct txbd8 *txbdp;
 -      struct rxbd8 *rxbdp;
        u32 __iomem *rfbptr;
        int i, j;
 -      dma_addr_t bufaddr;
  
        for (i = 0; i < priv->num_tx_queues; i++) {
                tx_queue = priv->tx_queue[i];
        rfbptr = &regs->rfbptr0;
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
 -              rx_queue->cur_rx = rx_queue->rx_bd_base;
 -              rx_queue->skb_currx = 0;
 -              rxbdp = rx_queue->rx_bd_base;
  
 -              for (j = 0; j < rx_queue->rx_ring_size; j++) {
 -                      struct sk_buff *skb = rx_queue->rx_skbuff[j];
 +              rx_queue->next_to_clean = 0;
 +              rx_queue->next_to_use = 0;
 +              rx_queue->next_to_alloc = 0;
  
 -                      if (skb) {
 -                              bufaddr = be32_to_cpu(rxbdp->bufPtr);
 -                      } else {
 -                              skb = gfar_new_skb(ndev, &bufaddr);
 -                              if (!skb) {
 -                                      netdev_err(ndev, "Can't allocate RX buffers\n");
 -                                      return -ENOMEM;
 -                              }
 -                              rx_queue->rx_skbuff[j] = skb;
 -                      }
 -
 -                      gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
 -                      rxbdp++;
 -              }
 +              /* make sure next_to_clean != next_to_use after this
 +               * by leaving at least 1 unused descriptor
 +               */
 +              gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
  
                rx_queue->rfbptr = rfbptr;
                rfbptr += 2;
        }
 -
 -      return 0;
  }
  
  static int gfar_alloc_skb_resources(struct net_device *ndev)
  {
        void *vaddr;
        dma_addr_t addr;
 -      int i, j, k;
 +      int i, j;
        struct gfar_private *priv = netdev_priv(ndev);
        struct device *dev = priv->dev;
        struct gfar_priv_tx_q *tx_queue = NULL;
                rx_queue = priv->rx_queue[i];
                rx_queue->rx_bd_base = vaddr;
                rx_queue->rx_bd_dma_base = addr;
 -              rx_queue->dev = ndev;
 +              rx_queue->ndev = ndev;
 +              rx_queue->dev = dev;
                addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
                vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
        }
                if (!tx_queue->tx_skbuff)
                        goto cleanup;
  
 -              for (k = 0; k < tx_queue->tx_ring_size; k++)
 -                      tx_queue->tx_skbuff[k] = NULL;
 +              for (j = 0; j < tx_queue->tx_ring_size; j++)
 +                      tx_queue->tx_skbuff[j] = NULL;
        }
  
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
 -              rx_queue->rx_skbuff =
 -                      kmalloc_array(rx_queue->rx_ring_size,
 -                                    sizeof(*rx_queue->rx_skbuff),
 -                                    GFP_KERNEL);
 -              if (!rx_queue->rx_skbuff)
 +              rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
 +                                          sizeof(*rx_queue->rx_buff),
 +                                          GFP_KERNEL);
 +              if (!rx_queue->rx_buff)
                        goto cleanup;
 -
 -              for (j = 0; j < rx_queue->rx_ring_size; j++)
 -                      rx_queue->rx_skbuff[j] = NULL;
        }
  
 -      if (gfar_init_bds(ndev))
 -              goto cleanup;
 +      gfar_init_bds(ndev);
  
        return 0;
  
@@@ -333,8 -354,10 +333,8 @@@ static void gfar_init_rqprm(struct gfar
        }
  }
  
 -static void gfar_rx_buff_size_config(struct gfar_private *priv)
 +static void gfar_rx_offload_en(struct gfar_private *priv)
  {
 -      int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
 -
        /* set this when rx hw offload (TOE) functions are being used */
        priv->uses_rxfcb = 0;
  
  
        if (priv->hwts_rx_en)
                priv->uses_rxfcb = 1;
 -
 -      if (priv->uses_rxfcb)
 -              frame_size += GMAC_FCB_LEN;
 -
 -      frame_size += priv->padding;
 -
 -      frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
 -                   INCREMENTAL_BUFFER_SIZE;
 -
 -      priv->rx_buffer_size = frame_size;
  }
  
  static void gfar_mac_rx_config(struct gfar_private *priv)
@@@ -560,8 -593,9 +560,8 @@@ static int gfar_alloc_rx_queues(struct 
                if (!priv->rx_queue[i])
                        return -ENOMEM;
  
 -              priv->rx_queue[i]->rx_skbuff = NULL;
                priv->rx_queue[i]->qindex = i;
 -              priv->rx_queue[i]->dev = priv->ndev;
 +              priv->rx_queue[i]->ndev = priv->ndev;
        }
        return 0;
  }
@@@ -1153,11 -1187,12 +1153,11 @@@ void gfar_mac_reset(struct gfar_privat
  
        udelay(3);
  
 -      /* Compute rx_buff_size based on config flags */
 -      gfar_rx_buff_size_config(priv);
 +      gfar_rx_offload_en(priv);
  
        /* Initialize the max receive frame/buffer lengths */
 -      gfar_write(&regs->maxfrm, priv->rx_buffer_size);
 -      gfar_write(&regs->mrblr, priv->rx_buffer_size);
 +      gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
 +      gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
  
        /* Initialize the Minimum Frame Length Register */
        gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
        /* Initialize MACCFG2. */
        tempval = MACCFG2_INIT_SETTINGS;
  
 -      /* If the mtu is larger than the max size for standard
 -       * ethernet frames (ie, a jumbo frame), then set maccfg2
 -       * to allow huge frames, and to check the length
 +      /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
 +       * are marked as truncated.  Avoid this by MACCFG2[Huge Frame]=1,
 +       * and by checking RxBD[LG] and discarding larger than MAXFRM.
         */
 -      if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
 -          gfar_has_errata(priv, GFAR_ERRATA_74))
 +      if (gfar_has_errata(priv, GFAR_ERRATA_74))
                tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
  
        gfar_write(&regs->maccfg2, tempval);
@@@ -1379,6 -1415,8 +1379,6 @@@ static int gfar_probe(struct platform_d
            priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
                dev->needed_headroom = GMAC_FCB_LEN;
  
 -      priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
 -
        /* Initializing some of the rx/tx queue level parameters */
        for (i = 0; i < priv->num_tx_queues; i++) {
                priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
@@@ -1561,7 -1599,10 +1561,7 @@@ static int gfar_restore(struct device *
                return 0;
        }
  
 -      if (gfar_init_bds(ndev)) {
 -              free_skb_resources(priv);
 -              return -ENOMEM;
 -      }
 +      gfar_init_bds(ndev);
  
        gfar_mac_reset(priv);
  
@@@ -1852,32 -1893,26 +1852,32 @@@ static void free_skb_tx_queue(struct gf
  
  static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
  {
 -      struct rxbd8 *rxbdp;
 -      struct gfar_private *priv = netdev_priv(rx_queue->dev);
        int i;
  
 -      rxbdp = rx_queue->rx_bd_base;
 +      struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
 +
 +      if (rx_queue->skb)
 +              dev_kfree_skb(rx_queue->skb);
  
        for (i = 0; i < rx_queue->rx_ring_size; i++) {
 -              if (rx_queue->rx_skbuff[i]) {
 -                      dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
 -                                       priv->rx_buffer_size,
 -                                       DMA_FROM_DEVICE);
 -                      dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
 -                      rx_queue->rx_skbuff[i] = NULL;
 -              }
 +              struct  gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
 +
                rxbdp->lstatus = 0;
                rxbdp->bufPtr = 0;
                rxbdp++;
 +
 +              if (!rxb->page)
 +                      continue;
 +
 +              dma_unmap_single(rx_queue->dev, rxb->dma,
 +                               PAGE_SIZE, DMA_FROM_DEVICE);
 +              __free_page(rxb->page);
 +
 +              rxb->page = NULL;
        }
 -      kfree(rx_queue->rx_skbuff);
 -      rx_queue->rx_skbuff = NULL;
 +
 +      kfree(rx_queue->rx_buff);
 +      rx_queue->rx_buff = NULL;
  }
  
  /* If there are any tx skbs or rx skbs still around, free them.
@@@ -1902,7 -1937,7 +1902,7 @@@ static void free_skb_resources(struct g
  
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
 -              if (rx_queue->rx_skbuff)
 +              if (rx_queue->rx_buff)
                        free_skb_rx_queue(rx_queue);
        }
  
@@@ -2067,6 -2102,11 +2067,11 @@@ int startup_gfar(struct net_device *nde
        /* Start Rx/Tx DMA and enable the interrupts */
        gfar_start(priv);
  
+       /* force link state update after mac reset */
+       priv->oldlink = 0;
+       priv->oldspeed = 0;
+       priv->oldduplex = -1;
        phy_start(priv->phydev);
  
        enable_napi(priv);
@@@ -2460,7 -2500,7 +2465,7 @@@ static int gfar_change_mtu(struct net_d
        struct gfar_private *priv = netdev_priv(dev);
        int frame_size = new_mtu + ETH_HLEN;
  
 -      if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
 +      if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
                netif_err(priv, drv, dev, "Invalid MTU setting\n");
                return -EINVAL;
        }
@@@ -2514,6 -2554,15 +2519,6 @@@ static void gfar_timeout(struct net_dev
        schedule_work(&priv->reset_task);
  }
  
 -static void gfar_align_skb(struct sk_buff *skb)
 -{
 -      /* We need the data buffer to be aligned properly.  We will reserve
 -       * as many bytes as needed to align the data properly
 -       */
 -      skb_reserve(skb, RXBUF_ALIGNMENT -
 -                  (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
 -}
 -
  /* Interrupt Handler for Transmit complete */
  static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
  {
  
                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
                        struct skb_shared_hwtstamps shhwtstamps;
 -                      u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
 +                      u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
 +                                        ~0x7UL);
  
                        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
                        shhwtstamps.hwtstamp = ns_to_ktime(*ns);
        netdev_tx_completed_queue(txq, howmany, bytes_sent);
  }
  
 -static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
 +static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
  {
 -      struct gfar_private *priv = netdev_priv(dev);
 -      struct sk_buff *skb;
 +      struct page *page;
 +      dma_addr_t addr;
  
 -      skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
 -      if (!skb)
 -              return NULL;
 +      page = dev_alloc_page();
 +      if (unlikely(!page))
 +              return false;
  
 -      gfar_align_skb(skb);
 +      addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
 +      if (unlikely(dma_mapping_error(rxq->dev, addr))) {
 +              __free_page(page);
  
 -      return skb;
 +              return false;
 +      }
 +
 +      rxb->dma = addr;
 +      rxb->page = page;
 +      rxb->page_offset = 0;
 +
 +      return true;
  }
  
 -static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
 +static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
  {
 -      struct gfar_private *priv = netdev_priv(dev);
 -      struct sk_buff *skb;
 -      dma_addr_t addr;
 +      struct gfar_private *priv = netdev_priv(rx_queue->ndev);
 +      struct gfar_extra_stats *estats = &priv->extra_stats;
  
 -      skb = gfar_alloc_skb(dev);
 -      if (!skb)
 -              return NULL;
 +      netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
 +      atomic64_inc(&estats->rx_alloc_err);
 +}
  
 -      addr = dma_map_single(priv->dev, skb->data,
 -                            priv->rx_buffer_size, DMA_FROM_DEVICE);
 -      if (unlikely(dma_mapping_error(priv->dev, addr))) {
 -              dev_kfree_skb_any(skb);
 -              return NULL;
 +static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
 +                              int alloc_cnt)
 +{
 +      struct rxbd8 *bdp;
 +      struct gfar_rx_buff *rxb;
 +      int i;
 +
 +      i = rx_queue->next_to_use;
 +      bdp = &rx_queue->rx_bd_base[i];
 +      rxb = &rx_queue->rx_buff[i];
 +
 +      while (alloc_cnt--) {
 +              /* try reuse page */
 +              if (unlikely(!rxb->page)) {
 +                      if (unlikely(!gfar_new_page(rx_queue, rxb))) {
 +                              gfar_rx_alloc_err(rx_queue);
 +                              break;
 +                      }
 +              }
 +
 +              /* Setup the new RxBD */
 +              gfar_init_rxbdp(rx_queue, bdp,
 +                              rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
 +
 +              /* Update to the next pointer */
 +              bdp++;
 +              rxb++;
 +
 +              if (unlikely(++i == rx_queue->rx_ring_size)) {
 +                      i = 0;
 +                      bdp = rx_queue->rx_bd_base;
 +                      rxb = rx_queue->rx_buff;
 +              }
        }
  
 -      *bufaddr = addr;
 -      return skb;
 +      rx_queue->next_to_use = i;
 +      rx_queue->next_to_alloc = i;
  }
  
 -static inline void count_errors(unsigned short status, struct net_device *dev)
 +static void count_errors(u32 lstatus, struct net_device *ndev)
  {
 -      struct gfar_private *priv = netdev_priv(dev);
 -      struct net_device_stats *stats = &dev->stats;
 +      struct gfar_private *priv = netdev_priv(ndev);
 +      struct net_device_stats *stats = &ndev->stats;
        struct gfar_extra_stats *estats = &priv->extra_stats;
  
        /* If the packet was truncated, none of the other errors matter */
 -      if (status & RXBD_TRUNCATED) {
 +      if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
                stats->rx_length_errors++;
  
                atomic64_inc(&estats->rx_trunc);
                return;
        }
        /* Count the errors, if there were any */
 -      if (status & (RXBD_LARGE | RXBD_SHORT)) {
 +      if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
                stats->rx_length_errors++;
  
 -              if (status & RXBD_LARGE)
 +              if (lstatus & BD_LFLAG(RXBD_LARGE))
                        atomic64_inc(&estats->rx_large);
                else
                        atomic64_inc(&estats->rx_short);
        }
 -      if (status & RXBD_NONOCTET) {
 +      if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
                stats->rx_frame_errors++;
                atomic64_inc(&estats->rx_nonoctet);
        }
 -      if (status & RXBD_CRCERR) {
 +      if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
                atomic64_inc(&estats->rx_crcerr);
                stats->rx_crc_errors++;
        }
 -      if (status & RXBD_OVERRUN) {
 +      if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
                atomic64_inc(&estats->rx_overrun);
 -              stats->rx_crc_errors++;
 +              stats->rx_over_errors++;
        }
  }
  
@@@ -2776,93 -2788,6 +2781,93 @@@ static irqreturn_t gfar_transmit(int ir
        return IRQ_HANDLED;
  }
  
 +static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
 +                           struct sk_buff *skb, bool first)
 +{
 +      unsigned int size = lstatus & BD_LENGTH_MASK;
 +      struct page *page = rxb->page;
 +
 +      /* Remove the FCS from the packet length */
 +      if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
 +              size -= ETH_FCS_LEN;
 +
 +      if (likely(first))
 +              skb_put(skb, size);
 +      else
 +              skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
 +                              rxb->page_offset + RXBUF_ALIGNMENT,
 +                              size, GFAR_RXB_TRUESIZE);
 +
 +      /* try reuse page */
 +      if (unlikely(page_count(page) != 1))
 +              return false;
 +
 +      /* change offset to the other half */
 +      rxb->page_offset ^= GFAR_RXB_TRUESIZE;
 +
 +      atomic_inc(&page->_count);
 +
 +      return true;
 +}
 +
 +static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
 +                             struct gfar_rx_buff *old_rxb)
 +{
 +      struct gfar_rx_buff *new_rxb;
 +      u16 nta = rxq->next_to_alloc;
 +
 +      new_rxb = &rxq->rx_buff[nta];
 +
 +      /* find next buf that can reuse a page */
 +      nta++;
 +      rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
 +
 +      /* copy page reference */
 +      *new_rxb = *old_rxb;
 +
 +      /* sync for use by the device */
 +      dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
 +                                       old_rxb->page_offset,
 +                                       GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
 +}
 +
 +static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
 +                                          u32 lstatus, struct sk_buff *skb)
 +{
 +      struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
 +      struct page *page = rxb->page;
 +      bool first = false;
 +
 +      if (likely(!skb)) {
 +              void *buff_addr = page_address(page) + rxb->page_offset;
 +
 +              skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
 +              if (unlikely(!skb)) {
 +                      gfar_rx_alloc_err(rx_queue);
 +                      return NULL;
 +              }
 +              skb_reserve(skb, RXBUF_ALIGNMENT);
 +              first = true;
 +      }
 +
 +      dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
 +                                    GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
 +
 +      if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
 +              /* reuse the free half of the page */
 +              gfar_reuse_rx_page(rx_queue, rxb);
 +      } else {
 +              /* page cannot be reused, unmap it */
 +              dma_unmap_page(rx_queue->dev, rxb->dma,
 +                             PAGE_SIZE, DMA_FROM_DEVICE);
 +      }
 +
 +      /* clear rxb content */
 +      rxb->page = NULL;
 +
 +      return skb;
 +}
 +
  static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
  {
        /* If valid headers were found, and valid sums
  }
  
  /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
 -static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
 -                             int amount_pull, struct napi_struct *napi)
 +static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
  {
 -      struct gfar_private *priv = netdev_priv(dev);
 +      struct gfar_private *priv = netdev_priv(ndev);
        struct rxfcb *fcb = NULL;
  
        /* fcb is at the beginning if exists */
        /* Remove the FCB from the skb
         * Remove the padded bytes, if there are any
         */
 -      if (amount_pull) {
 -              skb_record_rx_queue(skb, fcb->rq);
 -              skb_pull(skb, amount_pull);
 -      }
 +      if (priv->uses_rxfcb)
 +              skb_pull(skb, GMAC_FCB_LEN);
  
        /* Get receive timestamp from the skb */
        if (priv->hwts_rx_en) {
        if (priv->padding)
                skb_pull(skb, priv->padding);
  
 -      if (dev->features & NETIF_F_RXCSUM)
 +      if (ndev->features & NETIF_F_RXCSUM)
                gfar_rx_checksum(skb, fcb);
  
        /* Tell the skb what kind of packet this is */
 -      skb->protocol = eth_type_trans(skb, dev);
 +      skb->protocol = eth_type_trans(skb, ndev);
  
        /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
         * Even if vlan rx accel is disabled, on some chips
         * RXFCB_VLN is pseudo randomly set.
         */
 -      if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
 +      if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
            be16_to_cpu(fcb->flags) & RXFCB_VLN)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                       be16_to_cpu(fcb->vlctl));
 -
 -      /* Send the packet up the stack */
 -      napi_gro_receive(napi, skb);
 -
  }
  
  /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
   */
  int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
  {
 -      struct net_device *dev = rx_queue->dev;
 -      struct rxbd8 *bdp, *base;
 -      struct sk_buff *skb;
 -      int pkt_len;
 -      int amount_pull;
 -      int howmany = 0;
 -      struct gfar_private *priv = netdev_priv(dev);
 +      struct net_device *ndev = rx_queue->ndev;
 +      struct gfar_private *priv = netdev_priv(ndev);
 +      struct rxbd8 *bdp;
 +      int i, howmany = 0;
 +      struct sk_buff *skb = rx_queue->skb;
 +      int cleaned_cnt = gfar_rxbd_unused(rx_queue);
 +      unsigned int total_bytes = 0, total_pkts = 0;
  
        /* Get the first full descriptor */
 -      bdp = rx_queue->cur_rx;
 -      base = rx_queue->rx_bd_base;
 +      i = rx_queue->next_to_clean;
  
 -      amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
 +      while (rx_work_limit--) {
 +              u32 lstatus;
 +
 +              if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
 +                      gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
 +                      cleaned_cnt = 0;
 +              }
  
 -      while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
 -              struct sk_buff *newskb;
 -              dma_addr_t bufaddr;
 +              bdp = &rx_queue->rx_bd_base[i];
 +              lstatus = be32_to_cpu(bdp->lstatus);
 +              if (lstatus & BD_LFLAG(RXBD_EMPTY))
 +                      break;
  
 +              /* order rx buffer descriptor reads */
                rmb();
  
 -              /* Add another skb for the future */
 -              newskb = gfar_new_skb(dev, &bufaddr);
 +              /* fetch next to clean buffer from the ring */
 +              skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
 +              if (unlikely(!skb))
 +                      break;
  
 -              skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
 +              cleaned_cnt++;
 +              howmany++;
  
 -              dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
 -                               priv->rx_buffer_size, DMA_FROM_DEVICE);
 -
 -              if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
 -                           be16_to_cpu(bdp->length) > priv->rx_buffer_size))
 -                      bdp->status = cpu_to_be16(RXBD_LARGE);
 -
 -              /* We drop the frame if we failed to allocate a new buffer */
 -              if (unlikely(!newskb ||
 -                           !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
 -                           be16_to_cpu(bdp->status) & RXBD_ERR)) {
 -                      count_errors(be16_to_cpu(bdp->status), dev);
 -
 -                      if (unlikely(!newskb)) {
 -                              newskb = skb;
 -                              bufaddr = be32_to_cpu(bdp->bufPtr);
 -                      } else if (skb)
 -                              dev_kfree_skb(skb);
 -              } else {
 -                      /* Increment the number of packets */
 -                      rx_queue->stats.rx_packets++;
 -                      howmany++;
 -
 -                      if (likely(skb)) {
 -                              pkt_len = be16_to_cpu(bdp->length) -
 -                                        ETH_FCS_LEN;
 -                              /* Remove the FCS from the packet length */
 -                              skb_put(skb, pkt_len);
 -                              rx_queue->stats.rx_bytes += pkt_len;
 -                              skb_record_rx_queue(skb, rx_queue->qindex);
 -                              gfar_process_frame(dev, skb, amount_pull,
 -                                                 &rx_queue->grp->napi_rx);
 +              if (unlikely(++i == rx_queue->rx_ring_size))
 +                      i = 0;
  
 -                      } else {
 -                              netif_warn(priv, rx_err, dev, "Missing skb!\n");
 -                              rx_queue->stats.rx_dropped++;
 -                              atomic64_inc(&priv->extra_stats.rx_skbmissing);
 -                      }
 +              rx_queue->next_to_clean = i;
 +
 +              /* fetch next buffer if not the last in frame */
 +              if (!(lstatus & BD_LFLAG(RXBD_LAST)))
 +                      continue;
 +
 +              if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
 +                      count_errors(lstatus, ndev);
  
 +                      /* discard faulty buffer */
 +                      dev_kfree_skb(skb);
 +                      skb = NULL;
 +                      rx_queue->stats.rx_dropped++;
 +                      continue;
                }
  
 -              rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
 +              /* Increment the number of packets */
 +              total_pkts++;
 +              total_bytes += skb->len;
  
 -              /* Setup the new bdp */
 -              gfar_init_rxbdp(rx_queue, bdp, bufaddr);
 +              skb_record_rx_queue(skb, rx_queue->qindex);
  
 -              /* Update Last Free RxBD pointer for LFC */
 -              if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
 -                      gfar_write(rx_queue->rfbptr, (u32)bdp);
 +              gfar_process_frame(ndev, skb);
  
 -              /* Update to the next pointer */
 -              bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
 +              /* Send the packet up the stack */
 +              napi_gro_receive(&rx_queue->grp->napi_rx, skb);
  
 -              /* update to point at the next skb */
 -              rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
 -                                    RX_RING_MOD_MASK(rx_queue->rx_ring_size);
 +              skb = NULL;
        }
  
 -      /* Update the current rxbd pointer to be the next one */
 -      rx_queue->cur_rx = bdp;
 +      /* Store incomplete frames for completion */
 +      rx_queue->skb = skb;
 +
 +      rx_queue->stats.rx_packets += total_pkts;
 +      rx_queue->stats.rx_bytes += total_bytes;
 +
 +      if (cleaned_cnt)
 +              gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
 +
 +      /* Update Last Free RxBD pointer for LFC */
 +      if (unlikely(priv->tx_actual_en)) {
 +              u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
 +
 +              gfar_write(rx_queue->rfbptr, bdp_dma);
 +      }
  
        return howmany;
  }
@@@ -3525,6 -3459,7 +3530,6 @@@ static noinline void gfar_update_link_s
        struct phy_device *phydev = priv->phydev;
        struct gfar_priv_rx_q *rx_queue = NULL;
        int i;
 -      struct rxbd8 *bdp;
  
        if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
                return;
                /* Turn last free buffer recording on */
                if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
                        for (i = 0; i < priv->num_rx_queues; i++) {
 +                              u32 bdp_dma;
 +
                                rx_queue = priv->rx_queue[i];
 -                              bdp = rx_queue->cur_rx;
 -                              /* skip to previous bd */
 -                              bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
 -                                            rx_queue->rx_bd_base,
 -                                            rx_queue->rx_ring_size);
 -
 -                              if (rx_queue->rfbptr)
 -                                      gfar_write(rx_queue->rfbptr, (u32)bdp);
 +                              bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
 +                              gfar_write(rx_queue->rfbptr, bdp_dma);
                        }
  
                        priv->tx_actual_en = 1;
diff --combined drivers/net/phy/phy.c
index 84b1fba58ac3c8efcbbb0bf9311b442ac52614c1,1e1fbb049ec63b79c1d8255c517364f739a8d38b..d9728516dac32d935e9b19ffacf2d9431d81d448
@@@ -353,8 -353,6 +353,8 @@@ int phy_ethtool_sset(struct phy_device 
  
        phydev->duplex = cmd->duplex;
  
 +      phydev->mdix = cmd->eth_tp_mdix_ctrl;
 +
        /* Restart the PHY */
        phy_start_aneg(phydev);
  
@@@ -379,7 -377,6 +379,7 @@@ int phy_ethtool_gset(struct phy_device 
        cmd->transceiver = phy_is_internal(phydev) ?
                XCVR_INTERNAL : XCVR_EXTERNAL;
        cmd->autoneg = phydev->autoneg;
 +      cmd->eth_tp_mdix_ctrl = phydev->mdix;
  
        return 0;
  }
@@@ -814,6 -811,7 +814,7 @@@ void phy_state_machine(struct work_stru
        bool needs_aneg = false, do_suspend = false;
        enum phy_state old_state;
        int err = 0;
+       int old_link;
  
        mutex_lock(&phydev->lock);
  
                phydev->adjust_link(phydev->attached_dev);
                break;
        case PHY_RUNNING:
-               /* Only register a CHANGE if we are
-                * polling or ignoring interrupts
+               /* Only register a CHANGE if we are polling or ignoring
+                * interrupts and link changed since latest checking.
                 */
-               if (!phy_interrupt_is_valid(phydev))
-                       phydev->state = PHY_CHANGELINK;
+               if (!phy_interrupt_is_valid(phydev)) {
+                       old_link = phydev->link;
+                       err = phy_read_status(phydev);
+                       if (err)
+                               break;
+                       if (old_link != phydev->link)
+                               phydev->state = PHY_CHANGELINK;
+               }
                break;
        case PHY_CHANGELINK:
                err = phy_read_status(phydev);
index 1f7a7cd97e50277e48487e18eaeafc9406b27f46,64a60afbe50cc4ca0ff12b65ad6331a5fcc5e3a7..6392ae3c4ab82a5c7314ce219575bd09fe995922
@@@ -785,7 -785,7 +785,8 @@@ static const struct usb_device_id produ
        {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a8, 8)},    /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
 +      {QMI_FIXED_INTF(0x413c, 0x81b1, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x03f0, 0x581d, 4)},    /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
  
        /* 4. Gobi 1000 devices */
diff --combined kernel/events/core.c
index e2c6a8886d4d376450b61f8a3347427484ab392d,e6feb51141340a99a248fea0ad1dc17402b0dbdb..a1339b13c578516c64b5a49f39bec55ba8eda989
@@@ -1868,8 -1868,6 +1868,6 @@@ event_sched_in(struct perf_event *event
  
        perf_pmu_disable(event->pmu);
  
-       event->tstamp_running += tstamp - event->tstamp_stopped;
        perf_set_shadow_time(event, ctx, tstamp);
  
        perf_log_itrace_start(event);
                goto out;
        }
  
+       event->tstamp_running += tstamp - event->tstamp_stopped;
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
        if (!ctx->nr_active++)
@@@ -3212,59 -3212,6 +3212,59 @@@ static inline u64 perf_event_count(stru
        return __perf_event_count(event);
  }
  
 +/*
 + * NMI-safe method to read a local event, that is an event that
 + * is:
 + *   - either for the current task, or for this CPU
 + *   - does not have inherit set, for inherited task events
 + *     will not be local and we cannot read them atomically
 + *   - must not have a pmu::count method
 + */
 +u64 perf_event_read_local(struct perf_event *event)
 +{
 +      unsigned long flags;
 +      u64 val;
 +
 +      /*
 +       * Disabling interrupts avoids all counter scheduling (context
 +       * switches, timer based rotation and IPIs).
 +       */
 +      local_irq_save(flags);
 +
 +      /* If this is a per-task event, it must be for current */
 +      WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
 +                   event->hw.target != current);
 +
 +      /* If this is a per-CPU event, it must be for this CPU */
 +      WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
 +                   event->cpu != smp_processor_id());
 +
 +      /*
 +       * It must not be an event with inherit set, we cannot read
 +       * all child counters from atomic context.
 +       */
 +      WARN_ON_ONCE(event->attr.inherit);
 +
 +      /*
 +       * It must not have a pmu::count method, those are not
 +       * NMI safe.
 +       */
 +      WARN_ON_ONCE(event->pmu->count);
 +
 +      /*
 +       * If the event is currently on this CPU, its either a per-task event,
 +       * or local to this CPU. Furthermore it means its ACTIVE (otherwise
 +       * oncpu == -1).
 +       */
 +      if (event->oncpu == smp_processor_id())
 +              event->pmu->read(event);
 +
 +      val = local64_read(&event->count);
 +      local_irq_restore(flags);
 +
 +      return val;
 +}
 +
  static u64 perf_event_read(struct perf_event *event)
  {
        /*
@@@ -4011,28 -3958,21 +4011,21 @@@ static void perf_event_for_each(struct 
                perf_event_for_each_child(sibling, func);
  }
  
- static int perf_event_period(struct perf_event *event, u64 __user *arg)
- {
-       struct perf_event_context *ctx = event->ctx;
-       int ret = 0, active;
+ struct period_event {
+       struct perf_event *event;
        u64 value;
+ };
  
-       if (!is_sampling_event(event))
-               return -EINVAL;
-       if (copy_from_user(&value, arg, sizeof(value)))
-               return -EFAULT;
-       if (!value)
-               return -EINVAL;
+ static int __perf_event_period(void *info)
+ {
+       struct period_event *pe = info;
+       struct perf_event *event = pe->event;
+       struct perf_event_context *ctx = event->ctx;
+       u64 value = pe->value;
+       bool active;
  
-       raw_spin_lock_irq(&ctx->lock);
+       raw_spin_lock(&ctx->lock);
        if (event->attr.freq) {
-               if (value > sysctl_perf_event_sample_rate) {
-                       ret = -EINVAL;
-                       goto unlock;
-               }
                event->attr.sample_freq = value;
        } else {
                event->attr.sample_period = value;
                event->pmu->start(event, PERF_EF_RELOAD);
                perf_pmu_enable(ctx->pmu);
        }
+       raw_spin_unlock(&ctx->lock);
  
- unlock:
+       return 0;
+ }
+ static int perf_event_period(struct perf_event *event, u64 __user *arg)
+ {
+       struct period_event pe = { .event = event, };
+       struct perf_event_context *ctx = event->ctx;
+       struct task_struct *task;
+       u64 value;
+       if (!is_sampling_event(event))
+               return -EINVAL;
+       if (copy_from_user(&value, arg, sizeof(value)))
+               return -EFAULT;
+       if (!value)
+               return -EINVAL;
+       if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+               return -EINVAL;
+       task = ctx->task;
+       pe.value = value;
+       if (!task) {
+               cpu_function_call(event->cpu, __perf_event_period, &pe);
+               return 0;
+       }
+ retry:
+       if (!task_function_call(task, __perf_event_period, &pe))
+               return 0;
+       raw_spin_lock_irq(&ctx->lock);
+       if (ctx->is_active) {
+               raw_spin_unlock_irq(&ctx->lock);
+               task = ctx->task;
+               goto retry;
+       }
+       __perf_event_period(&pe);
        raw_spin_unlock_irq(&ctx->lock);
  
-       return ret;
+       return 0;
  }
  
  static const struct file_operations perf_fops;
@@@ -4793,12 -4775,20 +4828,20 @@@ static const struct file_operations per
   * to user-space before waking everybody up.
   */
  
+ static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+ {
+       /* only the parent has fasync state */
+       if (event->parent)
+               event = event->parent;
+       return &event->fasync;
+ }
  void perf_event_wakeup(struct perf_event *event)
  {
        ring_buffer_wakeup(event);
  
        if (event->pending_kill) {
-               kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+               kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
                event->pending_kill = 0;
        }
  }
@@@ -6177,7 -6167,7 +6220,7 @@@ static int __perf_event_overflow(struc
        else
                perf_event_output(event, data, regs);
  
-       if (event->fasync && event->pending_kill) {
+       if (*perf_event_fasync(event) && event->pending_kill) {
                event->pending_wakeup = 1;
                irq_work_queue(&event->pending);
        }
@@@ -8627,31 -8617,6 +8670,31 @@@ void perf_event_delayed_put(struct task
                WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
  }
  
 +struct perf_event *perf_event_get(unsigned int fd)
 +{
 +      int err;
 +      struct fd f;
 +      struct perf_event *event;
 +
 +      err = perf_fget_light(fd, &f);
 +      if (err)
 +              return ERR_PTR(err);
 +
 +      event = f.file->private_data;
 +      atomic_long_inc(&event->refcount);
 +      fdput(f);
 +
 +      return event;
 +}
 +
 +const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
 +{
 +      if (!event)
 +              return ERR_PTR(-EINVAL);
 +
 +      return &event->attr;
 +}
 +
  /*
   * inherit a event from parent task to child task:
   */
index db06de20d9962877b35d5de16a38b41f6319c592,5809b39c1922320e8dbb353de212b472199c796d..c1eb7b72ab15fb3a09bf3dc236dc7c83043d51d9
@@@ -19,7 -19,6 +19,7 @@@
  #include "main.h"
  
  #include <linux/atomic.h>
 +#include <linux/bitops.h>
  #include <linux/bug.h>
  #include <linux/byteorder/generic.h>
  #include <linux/compiler.h>
@@@ -596,8 -595,11 +596,11 @@@ bool batadv_tt_local_add(struct net_dev
        /* increase the refcounter of the related vlan */
        vlan = batadv_softif_vlan_get(bat_priv, vid);
        if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
-                addr, BATADV_PRINT_VID(vid)))
+                addr, BATADV_PRINT_VID(vid))) {
+               kfree(tt_local);
+               tt_local = NULL;
                goto out;
+       }
  
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
@@@ -1880,7 -1882,7 +1883,7 @@@ void batadv_tt_global_del_orig(struct b
                }
                spin_unlock_bh(list_lock);
        }
 -      orig_node->capa_initialized &= ~BATADV_ORIG_CAPA_HAS_TT;
 +      clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
  }
  
  static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
@@@ -2213,7 -2215,7 +2216,7 @@@ static void batadv_tt_req_list_free(str
        spin_lock_bh(&bat_priv->tt.req_list_lock);
  
        list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
 -              list_del(&node->list);
 +              list_del_init(&node->list);
                kfree(node);
        }
  
@@@ -2249,7 -2251,7 +2252,7 @@@ static void batadv_tt_req_purge(struct 
        list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                if (batadv_has_timed_out(node->issued_at,
                                         BATADV_TT_REQUEST_TIMEOUT)) {
 -                      list_del(&node->list);
 +                      list_del_init(&node->list);
                        kfree(node);
                }
        }
@@@ -2531,8 -2533,7 +2534,8 @@@ out
                batadv_hardif_free_ref(primary_if);
        if (ret && tt_req_node) {
                spin_lock_bh(&bat_priv->tt.req_list_lock);
 -              list_del(&tt_req_node->list);
 +              /* list_del_init() verifies tt_req_node still is in the list */
 +              list_del_init(&tt_req_node->list);
                spin_unlock_bh(&bat_priv->tt.req_list_lock);
                kfree(tt_req_node);
        }
@@@ -2840,7 -2841,7 +2843,7 @@@ static void _batadv_tt_update_changes(s
                                return;
                }
        }
 -      orig_node->capa_initialized |= BATADV_ORIG_CAPA_HAS_TT;
 +      set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
  }
  
  static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
@@@ -2969,7 -2970,7 +2972,7 @@@ static void batadv_handle_tt_response(s
        list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                if (!batadv_compare_eth(node->addr, resp_src))
                        continue;
 -              list_del(&node->list);
 +              list_del_init(&node->list);
                kfree(node);
        }
  
@@@ -3342,8 -3343,7 +3345,8 @@@ static void batadv_tt_update_orig(struc
        bool has_tt_init;
  
        tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
 -      has_tt_init = orig_node->capa_initialized & BATADV_ORIG_CAPA_HAS_TT;
 +      has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT,
 +                             &orig_node->capa_initialized);
  
        /* orig table not initialised AND first diff is in the OGM OR the ttvn
         * increased by one -> we can apply the attached changes
index 0752796fe0ba4443036a94a385ef4d1666cd3adc,1285eaf5dc222e7cf75f4da796f2075f098105ae..66efdc21f548524a19f3abc3ea5268b245f88dd2
@@@ -283,8 -283,6 +283,8 @@@ static void br_multicast_del_pg(struct 
                rcu_assign_pointer(*pp, p->next);
                hlist_del_init(&p->mglist);
                del_timer(&p->timer);
 +              br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
 +                            p->state);
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
  
                if (!mp->ports && !mp->mglist &&
@@@ -706,7 -704,7 +706,7 @@@ static int br_multicast_add_group(struc
        if (unlikely(!p))
                goto err;
        rcu_assign_pointer(*pp, p);
 -      br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
 +      br_mdb_notify(br->dev, port, group, RTM_NEWMDB, MDB_TEMPORARY);
  
  found:
        mod_timer(&p->timer, now + br->multicast_membership_interval);
@@@ -766,7 -764,6 +766,7 @@@ static void br_multicast_router_expired
                goto out;
  
        hlist_del_init_rcu(&port->rlist);
 +      br_rtr_notify(br->dev, port, RTM_DELMDB);
  
  out:
        spin_unlock(&br->multicast_lock);
@@@ -927,15 -924,6 +927,15 @@@ void br_multicast_add_port(struct net_b
  
  void br_multicast_del_port(struct net_bridge_port *port)
  {
 +      struct net_bridge *br = port->br;
 +      struct net_bridge_port_group *pg;
 +      struct hlist_node *n;
 +
 +      /* Take care of the remaining groups, only perm ones should be left */
 +      spin_lock_bh(&br->multicast_lock);
 +      hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
 +              br_multicast_del_pg(br, pg);
 +      spin_unlock_bh(&br->multicast_lock);
        del_timer_sync(&port->multicast_router_timer);
  }
  
@@@ -975,13 -963,10 +975,13 @@@ void br_multicast_disable_port(struct n
  
        spin_lock(&br->multicast_lock);
        hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
 -              br_multicast_del_pg(br, pg);
 +              if (pg->state == MDB_TEMPORARY)
 +                      br_multicast_del_pg(br, pg);
  
 -      if (!hlist_unhashed(&port->rlist))
 +      if (!hlist_unhashed(&port->rlist)) {
                hlist_del_init_rcu(&port->rlist);
 +              br_rtr_notify(br->dev, port, RTM_DELMDB);
 +      }
        del_timer(&port->multicast_router_timer);
        del_timer(&port->ip4_own_query.timer);
  #if IS_ENABLED(CONFIG_IPV6)
@@@ -1219,7 -1204,6 +1219,7 @@@ static void br_multicast_add_router(str
                hlist_add_behind_rcu(&port->rlist, slot);
        else
                hlist_add_head_rcu(&port->rlist, &br->router_list);
 +      br_rtr_notify(br->dev, port, RTM_NEWMDB);
  }
  
  static void br_multicast_mark_router(struct net_bridge *br,
@@@ -1453,8 -1437,7 +1453,8 @@@ br_multicast_leave_group(struct net_bri
                        hlist_del_init(&p->mglist);
                        del_timer(&p->timer);
                        call_rcu_bh(&p->rcu, br_multicast_free_pg);
 -                      br_mdb_notify(br->dev, port, group, RTM_DELMDB);
 +                      br_mdb_notify(br->dev, port, group, RTM_DELMDB,
 +                                    p->state);
  
                        if (!mp->ports && !mp->mglist &&
                            netif_running(br->dev))
@@@ -1608,7 -1591,7 +1608,7 @@@ static int br_multicast_ipv4_rcv(struc
                break;
        }
  
-       if (skb_trimmed)
+       if (skb_trimmed && skb_trimmed != skb)
                kfree_skb(skb_trimmed);
  
        return err;
@@@ -1653,7 -1636,7 +1653,7 @@@ static int br_multicast_ipv6_rcv(struc
                break;
        }
  
-       if (skb_trimmed)
+       if (skb_trimmed && skb_trimmed != skb)
                kfree_skb(skb_trimmed);
  
        return err;
@@@ -1771,6 -1754,12 +1771,6 @@@ void br_multicast_open(struct net_bridg
  
  void br_multicast_stop(struct net_bridge *br)
  {
 -      struct net_bridge_mdb_htable *mdb;
 -      struct net_bridge_mdb_entry *mp;
 -      struct hlist_node *n;
 -      u32 ver;
 -      int i;
 -
        del_timer_sync(&br->multicast_router_timer);
        del_timer_sync(&br->ip4_other_query.timer);
        del_timer_sync(&br->ip4_own_query.timer);
        del_timer_sync(&br->ip6_other_query.timer);
        del_timer_sync(&br->ip6_own_query.timer);
  #endif
 +}
 +
 +void br_multicast_dev_del(struct net_bridge *br)
 +{
 +      struct net_bridge_mdb_htable *mdb;
 +      struct net_bridge_mdb_entry *mp;
 +      struct hlist_node *n;
 +      u32 ver;
 +      int i;
  
        spin_lock_bh(&br->multicast_lock);
        mdb = mlock_dereference(br->mdb, br);
@@@ -1854,10 -1834,8 +1854,10 @@@ int br_multicast_set_port_router(struc
                p->multicast_router = val;
                err = 0;
  
 -              if (val < 2 && !hlist_unhashed(&p->rlist))
 +              if (val < 2 && !hlist_unhashed(&p->rlist)) {
                        hlist_del_init_rcu(&p->rlist);
 +                      br_rtr_notify(br->dev, p, RTM_DELMDB);
 +              }
  
                if (val == 1)
                        break;
diff --combined net/ipv4/fib_trie.c
index 1243c79cb5b0178052cae82e7a53728950f302e3,b0c6258ffb79a7cbcaaf1296e4842db52876b5b2..5154f81c53266841ae52913bda220dcf99244a8c
@@@ -1423,11 -1423,8 +1423,11 @@@ found
                            nh->nh_flags & RTNH_F_LINKDOWN &&
                            !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
                                continue;
 -                      if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
 -                              continue;
 +                      if (!(flp->flowi4_flags & FLOWI_FLAG_VRFSRC)) {
 +                              if (flp->flowi4_oif &&
 +                                  flp->flowi4_oif != nh->nh_oif)
 +                                      continue;
 +                      }
  
                        if (!(fib_flags & FIB_LOOKUP_NOREF))
                                atomic_inc(&fi->fib_clntref);
@@@ -2468,7 -2465,7 +2468,7 @@@ static struct key_vector *fib_route_get
                key = l->key + 1;
                iter->pos++;
  
-               if (pos-- <= 0)
+               if (--pos <= 0)
                        break;
  
                l = NULL;
diff --combined net/ipv6/ip6_fib.c
index 865e777ae20cbd7d8b17b94ee56bfe28de23c96e,548c6237b1e706f8ef72575a6a7dbad544b60fcc..418d9823692b6e78077d44c1ed8b15e998e2316b
@@@ -32,7 -32,6 +32,7 @@@
  #include <net/ipv6.h>
  #include <net/ndisc.h>
  #include <net/addrconf.h>
 +#include <net/lwtunnel.h>
  
  #include <net/ip6_fib.h>
  #include <net/ip6_route.h>
@@@ -173,6 -172,8 +173,8 @@@ static void rt6_free_pcpu(struct rt6_in
                        *ppcpu_rt = NULL;
                }
        }
+       non_pcpu_rt->rt6i_pcpu = NULL;
  }
  
  static void rt6_release(struct rt6_info *rt)
diff --combined net/ipv6/route.c
index 6c0fe4c7ce8d438a79e6da300caab55a7944e2c6,d15586490cecaedcc29bc821163cd6c85544b0b8..e476f01add87c62295a52fef641c806caca7d09a
  #include <net/tcp.h>
  #include <linux/rtnetlink.h>
  #include <net/dst.h>
 +#include <net/dst_metadata.h>
  #include <net/xfrm.h>
  #include <net/netevent.h>
  #include <net/netlink.h>
  #include <net/nexthop.h>
 +#include <net/lwtunnel.h>
 +#include <net/ip_tunnels.h>
  
  #include <asm/uaccess.h>
  
@@@ -321,8 -318,7 +321,7 @@@ static const struct rt6_info ip6_blk_ho
  /* allocate dst with ip6_dst_ops */
  static struct rt6_info *__ip6_dst_alloc(struct net *net,
                                        struct net_device *dev,
-                                       int flags,
-                                       struct fib6_table *table)
+                                       int flags)
  {
        struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
                                        0, DST_OBSOLETE_FORCE_CHK, flags);
  
  static struct rt6_info *ip6_dst_alloc(struct net *net,
                                      struct net_device *dev,
-                                     int flags,
-                                     struct fib6_table *table)
+                                     int flags)
  {
-       struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table);
+       struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
  
        if (rt) {
                rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
@@@ -540,14 -535,13 +538,14 @@@ static void rt6_probe_deferred(struct w
                container_of(w, struct __rt6_probe_work, work);
  
        addrconf_addr_solict_mult(&work->target, &mcaddr);
 -      ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
 +      ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
        dev_put(work->dev);
        kfree(work);
  }
  
  static void rt6_probe(struct rt6_info *rt)
  {
 +      struct __rt6_probe_work *work;
        struct neighbour *neigh;
        /*
         * Okay, this does not seem to be appropriate
        rcu_read_lock_bh();
        neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
        if (neigh) {
 -              write_lock(&neigh->lock);
                if (neigh->nud_state & NUD_VALID)
                        goto out;
 -      }
 -
 -      if (!neigh ||
 -          time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
 -              struct __rt6_probe_work *work;
  
 +              work = NULL;
 +              write_lock(&neigh->lock);
 +              if (!(neigh->nud_state & NUD_VALID) &&
 +                  time_after(jiffies,
 +                             neigh->updated +
 +                             rt->rt6i_idev->cnf.rtr_probe_interval)) {
 +                      work = kmalloc(sizeof(*work), GFP_ATOMIC);
 +                      if (work)
 +                              __neigh_set_probe_once(neigh);
 +              }
 +              write_unlock(&neigh->lock);
 +      } else {
                work = kmalloc(sizeof(*work), GFP_ATOMIC);
 +      }
  
 -              if (neigh && work)
 -                      __neigh_set_probe_once(neigh);
 -
 -              if (neigh)
 -                      write_unlock(&neigh->lock);
 +      if (work) {
 +              INIT_WORK(&work->work, rt6_probe_deferred);
 +              work->target = rt->rt6i_gateway;
 +              dev_hold(rt->dst.dev);
 +              work->dev = rt->dst.dev;
 +              schedule_work(&work->work);
 +      }
  
 -              if (work) {
 -                      INIT_WORK(&work->work, rt6_probe_deferred);
 -                      work->target = rt->rt6i_gateway;
 -                      dev_hold(rt->dst.dev);
 -                      work->dev = rt->dst.dev;
 -                      schedule_work(&work->work);
 -              }
 -      } else {
  out:
 -              write_unlock(&neigh->lock);
 -      }
        rcu_read_unlock_bh();
  }
  #else
@@@ -667,12 -662,6 +665,12 @@@ static struct rt6_info *find_match(stru
  {
        int m;
        bool match_do_rr = false;
 +      struct inet6_dev *idev = rt->rt6i_idev;
 +      struct net_device *dev = rt->dst.dev;
 +
 +      if (dev && !netif_carrier_ok(dev) &&
 +          idev->cnf.ignore_routes_with_linkdown)
 +              goto out;
  
        if (rt6_check_expired(rt))
                goto out;
@@@ -959,8 -948,7 +957,7 @@@ static struct rt6_info *ip6_rt_cache_al
        if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
                ort = (struct rt6_info *)ort->dst.from;
  
-       rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev,
-                            0, ort->rt6i_table);
+       rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
  
        if (!rt)
                return NULL;
@@@ -992,8 -980,7 +989,7 @@@ static struct rt6_info *ip6_rt_pcpu_all
        struct rt6_info *pcpu_rt;
  
        pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
-                                 rt->dst.dev, rt->dst.flags,
-                                 rt->rt6i_table);
+                                 rt->dst.dev, rt->dst.flags);
  
        if (!pcpu_rt)
                return NULL;
  /* It should be called with read_lock_bh(&tb6_lock) acquired */
  static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
  {
-       struct rt6_info *pcpu_rt, *prev, **p;
+       struct rt6_info *pcpu_rt, **p;
  
        p = this_cpu_ptr(rt->rt6i_pcpu);
        pcpu_rt = *p;
  
-       if (pcpu_rt)
-               goto done;
+       if (pcpu_rt) {
+               dst_hold(&pcpu_rt->dst);
+               rt6_dst_from_metrics_check(pcpu_rt);
+       }
+       return pcpu_rt;
+ }
+ static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
+ {
+       struct fib6_table *table = rt->rt6i_table;
+       struct rt6_info *pcpu_rt, *prev, **p;
  
        pcpu_rt = ip6_rt_pcpu_alloc(rt);
        if (!pcpu_rt) {
                struct net *net = dev_net(rt->dst.dev);
  
-               pcpu_rt = net->ipv6.ip6_null_entry;
-               goto done;
+               dst_hold(&net->ipv6.ip6_null_entry->dst);
+               return net->ipv6.ip6_null_entry;
        }
  
-       prev = cmpxchg(p, NULL, pcpu_rt);
-       if (prev) {
-               /* If someone did it before us, return prev instead */
+       read_lock_bh(&table->tb6_lock);
+       if (rt->rt6i_pcpu) {
+               p = this_cpu_ptr(rt->rt6i_pcpu);
+               prev = cmpxchg(p, NULL, pcpu_rt);
+               if (prev) {
+                       /* If someone did it before us, return prev instead */
+                       dst_destroy(&pcpu_rt->dst);
+                       pcpu_rt = prev;
+               }
+       } else {
+               /* rt has been removed from the fib6 tree
+                * before we have a chance to acquire the read_lock.
+                * In this case, don't brother to create a pcpu rt
+                * since rt is going away anyway.  The next
+                * dst_check() will trigger a re-lookup.
+                */
                dst_destroy(&pcpu_rt->dst);
-               pcpu_rt = prev;
+               pcpu_rt = rt;
        }
- done:
        dst_hold(&pcpu_rt->dst);
        rt6_dst_from_metrics_check(pcpu_rt);
+       read_unlock_bh(&table->tb6_lock);
        return pcpu_rt;
  }
  
@@@ -1106,9 -1114,22 +1123,22 @@@ redo_rt6_select
                rt->dst.lastuse = jiffies;
                rt->dst.__use++;
                pcpu_rt = rt6_get_pcpu_route(rt);
-               read_unlock_bh(&table->tb6_lock);
+               if (pcpu_rt) {
+                       read_unlock_bh(&table->tb6_lock);
+               } else {
+                       /* We have to do the read_unlock first
+                        * because rt6_make_pcpu_route() may trigger
+                        * ip6_dst_gc() which will take the write_lock.
+                        */
+                       dst_hold(&rt->dst);
+                       read_unlock_bh(&table->tb6_lock);
+                       pcpu_rt = rt6_make_pcpu_route(rt);
+                       dst_release(&rt->dst);
+               }
  
                return pcpu_rt;
        }
  }
  
@@@ -1133,7 -1154,6 +1163,7 @@@ void ip6_route_input(struct sk_buff *sk
        const struct ipv6hdr *iph = ipv6_hdr(skb);
        struct net *net = dev_net(skb->dev);
        int flags = RT6_LOOKUP_F_HAS_SADDR;
 +      struct ip_tunnel_info *tun_info;
        struct flowi6 fl6 = {
                .flowi6_iif = skb->dev->ifindex,
                .daddr = iph->daddr,
                .flowi6_proto = iph->nexthdr,
        };
  
 +      tun_info = skb_tunnel_info(skb);
 +      if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
 +              fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
 +      skb_dst_drop(skb);
        skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
  }
  
@@@ -1569,7 -1585,7 +1599,7 @@@ struct dst_entry *icmp6_dst_alloc(struc
        if (unlikely(!idev))
                return ERR_PTR(-ENODEV);
  
-       rt = ip6_dst_alloc(net, dev, 0, NULL);
+       rt = ip6_dst_alloc(net, dev, 0);
        if (unlikely(!rt)) {
                in6_dev_put(idev);
                dst = ERR_PTR(-ENOMEM);
@@@ -1756,7 -1772,8 +1786,8 @@@ int ip6_route_add(struct fib6_config *c
        if (!table)
                goto out;
  
-       rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
+       rt = ip6_dst_alloc(net, NULL,
+                          (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
  
        if (!rt) {
                err = -ENOMEM;
  
        rt->dst.output = ip6_output;
  
 +      if (cfg->fc_encap) {
 +              struct lwtunnel_state *lwtstate;
 +
 +              err = lwtunnel_build_state(dev, cfg->fc_encap_type,
 +                                         cfg->fc_encap, &lwtstate);
 +              if (err)
 +                      goto out;
 +              rt->dst.lwtstate = lwtstate_get(lwtstate);
 +              if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
 +                      rt->dst.lwtstate->orig_output = rt->dst.output;
 +                      rt->dst.output = lwtunnel_output;
 +              }
 +              if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
 +                      rt->dst.lwtstate->orig_input = rt->dst.input;
 +                      rt->dst.input = lwtunnel_input;
 +              }
 +      }
 +
        ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
        rt->rt6i_dst.plen = cfg->fc_dst_len;
        if (rt->rt6i_dst.plen == 128)
@@@ -2181,7 -2180,6 +2212,7 @@@ static void ip6_rt_copy_init(struct rt6
  #endif
        rt->rt6i_prefsrc = ort->rt6i_prefsrc;
        rt->rt6i_table = ort->rt6i_table;
 +      rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
  }
  
  #ifdef CONFIG_IPV6_ROUTE_INFO
@@@ -2432,7 -2430,7 +2463,7 @@@ struct rt6_info *addrconf_dst_alloc(str
  {
        struct net *net = dev_net(idev->dev);
        struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
-                                           DST_NOCOUNT, NULL);
+                                           DST_NOCOUNT);
        if (!rt)
                return ERR_PTR(-ENOMEM);
  
@@@ -2630,8 -2628,6 +2661,8 @@@ static const struct nla_policy rtm_ipv6
        [RTA_METRICS]           = { .type = NLA_NESTED },
        [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
        [RTA_PREF]              = { .type = NLA_U8 },
 +      [RTA_ENCAP_TYPE]        = { .type = NLA_U16 },
 +      [RTA_ENCAP]             = { .type = NLA_NESTED },
  };
  
  static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
                cfg->fc_flags |= RTF_PREF(pref);
        }
  
 +      if (tb[RTA_ENCAP])
 +              cfg->fc_encap = tb[RTA_ENCAP];
 +
 +      if (tb[RTA_ENCAP_TYPE])
 +              cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
 +
        err = 0;
  errout:
        return err;
@@@ -2764,10 -2754,6 +2795,10 @@@ beginning
                                r_cfg.fc_gateway = nla_get_in6_addr(nla);
                                r_cfg.fc_flags |= RTF_GATEWAY;
                        }
 +                      r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
 +                      nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
 +                      if (nla)
 +                              r_cfg.fc_encap_type = nla_get_u16(nla);
                }
                err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
                if (err) {
@@@ -2830,7 -2816,7 +2861,7 @@@ static int inet6_rtm_newroute(struct sk
                return ip6_route_add(&cfg);
  }
  
 -static inline size_t rt6_nlmsg_size(void)
 +static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
  {
        return NLMSG_ALIGN(sizeof(struct rtmsg))
               + nla_total_size(16) /* RTA_SRC */
               + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
               + nla_total_size(sizeof(struct rta_cacheinfo))
               + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
 -             + nla_total_size(1); /* RTA_PREF */
 +             + nla_total_size(1) /* RTA_PREF */
 +             + lwtunnel_get_encap_size(rt->dst.lwtstate);
  }
  
  static int rt6_fill_node(struct net *net,
        else
                rtm->rtm_type = RTN_UNICAST;
        rtm->rtm_flags = 0;
 +      if (!netif_carrier_ok(rt->dst.dev)) {
 +              rtm->rtm_flags |= RTNH_F_LINKDOWN;
 +              if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
 +                      rtm->rtm_flags |= RTNH_F_DEAD;
 +      }
        rtm->rtm_scope = RT_SCOPE_UNIVERSE;
        rtm->rtm_protocol = rt->rt6i_protocol;
        if (rt->rt6i_flags & RTF_DYNAMIC)
        if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
                goto nla_put_failure;
  
 +      lwtunnel_fill_encap(skb, rt->dst.lwtstate);
 +
        nlmsg_end(skb, nlh);
        return 0;
  
@@@ -3126,7 -3104,7 +3157,7 @@@ void inet6_rt_notify(int event, struct 
        err = -ENOBUFS;
        seq = info->nlh ? info->nlh->nlmsg_seq : 0;
  
 -      skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
 +      skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
        if (!skb)
                goto errout;