Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Mon, 27 Feb 2012 02:55:51 +0000 (21:55 -0500)
committerDavid S. Miller <davem@davemloft.net>
Mon, 27 Feb 2012 02:55:51 +0000 (21:55 -0500)
Conflicts:
drivers/net/ethernet/sfc/rx.c

Overlapping changes in drivers/net/ethernet/sfc/rx.c, one to change
the rx_buf->is_page boolean into a set of u16 flags, and another to
adjust how ->ip_summed is initialized.

Signed-off-by: David S. Miller <davem@davemloft.net>
22 files changed:
1  2 
MAINTAINERS
drivers/net/can/sja1000/sja1000.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/cisco/enic/enic_pp.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ppp/ppp_generic.c
drivers/net/vmxnet3/vmxnet3_drv.c
include/linux/skbuff.h
net/atm/clip.c
net/core/neighbour.c
net/core/rtnetlink.c
net/ipv4/ip_gre.c
net/ipv4/ping.c
net/ipv6/ndisc.c
net/netfilter/nf_conntrack_netlink.c

diff --combined MAINTAINERS
index ed48d55662c1ffe6ddd6415ff26aee59de691c5b,75a9a5fc230acde24280a584bc7f8af994b677c7..3b5fe0c09c2389362eda5ef7c8ad833376000a38
@@@ -269,7 -269,6 +269,6 @@@ S: Orpha
  F:    drivers/platform/x86/wmi.c
  
  AD1889 ALSA SOUND DRIVER
- M:    Kyle McMartin <kyle@mcmartin.ca>
  M:    Thibaut Varene <T-Bone@parisc-linux.org>
  W:    http://wiki.parisc-linux.org/AD1889
  L:    linux-parisc@vger.kernel.org
@@@ -1406,7 -1405,7 +1405,7 @@@ F:      net/ax25
  B43 WIRELESS DRIVER
  M:    Stefano Brivio <stefano.brivio@polimi.it>
  L:    linux-wireless@vger.kernel.org
 -L:    b43-dev@lists.infradead.org (moderated for non-subscribers)
 +L:    b43-dev@lists.infradead.org
  W:    http://linuxwireless.org/en/users/Drivers/b43
  S:    Maintained
  F:    drivers/net/wireless/b43/
@@@ -1415,7 -1414,6 +1414,7 @@@ B43LEGACY WIRELESS DRIVE
  M:    Larry Finger <Larry.Finger@lwfinger.net>
  M:    Stefano Brivio <stefano.brivio@polimi.it>
  L:    linux-wireless@vger.kernel.org
 +L:    b43-dev@lists.infradead.org
  W:    http://linuxwireless.org/en/users/Drivers/b43
  S:    Maintained
  F:    drivers/net/wireless/b43legacy/
@@@ -1799,8 -1797,7 +1798,8 @@@ F:      Documentation/zh_CN
  CISCO VIC ETHERNET NIC DRIVER
  M:    Christian Benvenuti <benve@cisco.com>
  M:    Roopa Prabhu <roprabhu@cisco.com>
 -M:    David Wang <dwang2@cisco.com>
 +M:    Neel Patel <neepatel@cisco.com>
 +M:    Nishank Trivedi <nistrive@cisco.com>
  S:    Supported
  F:    drivers/net/ethernet/cisco/enic/
  
@@@ -3049,7 -3046,6 +3048,6 @@@ F:      drivers/hwspinlock/hwspinlock_
  F:    include/linux/hwspinlock.h
  
  HARMONY SOUND DRIVER
- M:    Kyle McMartin <kyle@mcmartin.ca>
  L:    linux-parisc@vger.kernel.org
  S:    Maintained
  F:    sound/parisc/harmony.*
@@@ -4914,6 -4910,8 +4912,6 @@@ F:      fs/ocfs2
  
  ORINOCO DRIVER
  L:    linux-wireless@vger.kernel.org
 -L:    orinoco-users@lists.sourceforge.net
 -L:    orinoco-devel@lists.sourceforge.net
  W:    http://linuxwireless.org/en/users/Drivers/orinoco
  W:    http://www.nongnu.org/orinoco/
  S:    Orphan
@@@ -5000,9 -4998,8 +4998,8 @@@ F:      Documentation/blockdev/paride.tx
  F:    drivers/block/paride/
  
  PARISC ARCHITECTURE
- M:    Kyle McMartin <kyle@mcmartin.ca>
- M:    Helge Deller <deller@gmx.de>
  M:    "James E.J. Bottomley" <jejb@parisc-linux.org>
+ M:    Helge Deller <deller@gmx.de>
  L:    linux-parisc@vger.kernel.org
  W:    http://www.parisc-linux.org/
  Q:    http://patchwork.kernel.org/project/linux-parisc/list/
@@@ -5861,7 -5858,7 +5858,7 @@@ S:      Maintaine
  F:    drivers/mmc/host/sdhci-spear.c
  
  SECURITY SUBSYSTEM
- M:    James Morris <jmorris@namei.org>
+ M:    James Morris <james.l.morris@oracle.com>
  L:    linux-security-module@vger.kernel.org (suggested Cc:)
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
  W:    http://security.wiki.kernel.org/
@@@ -5874,7 -5871,7 +5871,7 @@@ S:      Supporte
  
  SELINUX SECURITY MODULE
  M:    Stephen Smalley <sds@tycho.nsa.gov>
- M:    James Morris <jmorris@namei.org>
+ M:    James Morris <james.l.morris@oracle.com>
  M:    Eric Paris <eparis@parisplace.org>
  L:    selinux@tycho.nsa.gov (subscribers-only, general discussion)
  W:    http://selinuxproject.org
@@@ -7461,12 -7458,6 +7458,12 @@@ S:    Supporte
  F:    Documentation/filesystems/xfs.txt
  F:    fs/xfs/
  
 +XILINX AXI ETHERNET DRIVER
 +M:    Ariane Keller <ariane.keller@tik.ee.ethz.ch>
 +M:    Daniel Borkmann <daniel.borkmann@tik.ee.ethz.ch>
 +S:    Maintained
 +F:    drivers/net/ethernet/xilinx/xilinx_axienet*
 +
  XILINX SYSTEMACE DRIVER
  M:    Grant Likely <grant.likely@secretlab.ca>
  W:    http://www.secretlab.ca/
index ebbcfcafe29b7c177a01f59651c6a7803bef42e8,192b0d118df46d4136c5bd5bab872f35d51bcbc7..5e10472371eda2db2764e5976b55e58d9f70f044
@@@ -95,11 -95,16 +95,16 @@@ static void sja1000_write_cmdreg(struc
        spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
  }
  
+ static int sja1000_is_absent(struct sja1000_priv *priv)
+ {
+       return (priv->read_reg(priv, REG_MOD) == 0xFF);
+ }
  static int sja1000_probe_chip(struct net_device *dev)
  {
        struct sja1000_priv *priv = netdev_priv(dev);
  
-       if (priv->reg_base && (priv->read_reg(priv, 0) == 0xFF)) {
+       if (priv->reg_base && sja1000_is_absent(priv)) {
                printk(KERN_INFO "%s: probing @0x%lX failed\n",
                       DRV_NAME, dev->base_addr);
                return 0;
@@@ -128,7 -133,7 +133,7 @@@ static void set_reset_mode(struct net_d
                status = priv->read_reg(priv, REG_MOD);
        }
  
 -      dev_err(dev->dev.parent, "setting SJA1000 into reset mode failed!\n");
 +      netdev_err(dev, "setting SJA1000 into reset mode failed!\n");
  }
  
  static void set_normal_mode(struct net_device *dev)
                status = priv->read_reg(priv, REG_MOD);
        }
  
 -      dev_err(dev->dev.parent, "setting SJA1000 into normal mode failed!\n");
 +      netdev_err(dev, "setting SJA1000 into normal mode failed!\n");
  }
  
  static void sja1000_start(struct net_device *dev)
@@@ -209,7 -214,8 +214,7 @@@ static int sja1000_set_bittiming(struc
        if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
                btr1 |= 0x80;
  
 -      dev_info(dev->dev.parent,
 -               "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
 +      netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
  
        priv->write_reg(priv, REG_BTR0, btr0);
        priv->write_reg(priv, REG_BTR1, btr1);
@@@ -377,7 -383,7 +382,7 @@@ static int sja1000_err(struct net_devic
  
        if (isrc & IRQ_DOI) {
                /* data overrun interrupt */
 -              dev_dbg(dev->dev.parent, "data overrun interrupt\n");
 +              netdev_dbg(dev, "data overrun interrupt\n");
                cf->can_id |= CAN_ERR_CRTL;
                cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
                stats->rx_over_errors++;
  
        if (isrc & IRQ_EI) {
                /* error warning interrupt */
 -              dev_dbg(dev->dev.parent, "error warning interrupt\n");
 +              netdev_dbg(dev, "error warning interrupt\n");
  
                if (status & SR_BS) {
                        state = CAN_STATE_BUS_OFF;
        }
        if (isrc & IRQ_EPI) {
                /* error passive interrupt */
 -              dev_dbg(dev->dev.parent, "error passive interrupt\n");
 +              netdev_dbg(dev, "error passive interrupt\n");
                if (status & SR_ES)
                        state = CAN_STATE_ERROR_PASSIVE;
                else
        }
        if (isrc & IRQ_ALI) {
                /* arbitration lost interrupt */
 -              dev_dbg(dev->dev.parent, "arbitration lost interrupt\n");
 +              netdev_dbg(dev, "arbitration lost interrupt\n");
                alc = priv->read_reg(priv, REG_ALC);
                priv->can.can_stats.arbitration_lost++;
                stats->tx_errors++;
@@@ -492,9 -498,12 +497,12 @@@ irqreturn_t sja1000_interrupt(int irq, 
        while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
                n++;
                status = priv->read_reg(priv, REG_SR);
+               /* check for absent controller due to hw unplug */
+               if (status == 0xFF && sja1000_is_absent(priv))
+                       return IRQ_NONE;
  
                if (isrc & IRQ_WUI)
 -                      dev_warn(dev->dev.parent, "wakeup interrupt\n");
 +                      netdev_warn(dev, "wakeup interrupt\n");
  
                if (isrc & IRQ_TI) {
                        /* transmission complete interrupt */
                        while (status & SR_RBS) {
                                sja1000_rx(dev);
                                status = priv->read_reg(priv, REG_SR);
+                               /* check for absent controller */
+                               if (status == 0xFF && sja1000_is_absent(priv))
+                                       return IRQ_NONE;
                        }
                }
                if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
                priv->post_irq(priv);
  
        if (n >= SJA1000_MAX_IRQ)
 -              dev_dbg(dev->dev.parent, "%d messages handled in ISR", n);
 +              netdev_dbg(dev, "%d messages handled in ISR", n);
  
        return (n) ? IRQ_HANDLED : IRQ_NONE;
  }
index ccdf1f6e2f9f0c5979c367fcd0736165b9b2cf89,1ff3c6df35a21a5f03b66b8316839466a3655e31..0f21a9b4cdd41f3c44c336b743cdacbb112389d9
@@@ -468,7 -468,6 +468,7 @@@ static int atl1c_set_mac_addr(struct ne
  
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
 +      netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
  
        atl1c_hw_set_mac_addr(&adapter->hw);
  
@@@ -1766,7 -1765,7 +1766,7 @@@ static int atl1c_alloc_rx_buffer(struc
        while (next_info->flags & ATL1C_BUFFER_FREE) {
                rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
  
 -              skb = dev_alloc_skb(adapter->rx_buffer_len);
 +              skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len);
                if (unlikely(!skb)) {
                        if (netif_msg_rx_err(adapter))
                                dev_warn(&pdev->dev, "alloc rx buffer failed\n");
@@@ -2245,10 -2244,6 +2245,6 @@@ static netdev_tx_t atl1c_xmit_frame(str
                        dev_info(&adapter->pdev->dev, "tx locked\n");
                return NETDEV_TX_LOCKED;
        }
-       if (skb->mark == 0x01)
-               type = atl1c_trans_high;
-       else
-               type = atl1c_trans_normal;
  
        if (atl1c_tpd_avail(adapter, type) < tpd_req) {
                /* no enough descriptor, just stop queue */
@@@ -2690,6 -2685,7 +2686,6 @@@ static int __devinit atl1c_probe(struc
        netdev = alloc_etherdev(sizeof(struct atl1c_adapter));
        if (netdev == NULL) {
                err = -ENOMEM;
 -              dev_err(&pdev->dev, "etherdev alloc failed\n");
                goto err_alloc_etherdev;
        }
  
                err = -EIO;
                goto err_reset;
        }
 -      if (atl1c_read_mac_addr(&adapter->hw) != 0) {
 -              err = -EIO;
 -              dev_err(&pdev->dev, "get mac address failed\n");
 -              goto err_eeprom;
 +      if (atl1c_read_mac_addr(&adapter->hw)) {
 +              /* got a random MAC address, set NET_ADDR_RANDOM to netdev */
 +              netdev->addr_assign_type |= NET_ADDR_RANDOM;
        }
        memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
        memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
  err_reset:
  err_register:
  err_sw_init:
 -err_eeprom:
        iounmap(adapter->hw.hw_addr);
  err_init_netdev:
  err_ioremap:
index 66f53c797e3a470279eb01e6a31c6d37101fa9fc,cab87456a34a33a54a328d48fefd9086eb936b6d..46b8b7d81633eaa95fb4755a2225494a8b93856a
@@@ -2138,6 -2138,7 +2138,6 @@@ static int __devinit b44_init_one(struc
  
        dev = alloc_etherdev(sizeof(*bp));
        if (!dev) {
 -              dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
                err = -ENOMEM;
                goto out;
        }
@@@ -2338,7 -2339,7 +2338,7 @@@ static inline int __init b44_pci_init(v
        return err;
  }
  
- static inline void __exit b44_pci_exit(void)
+ static inline void b44_pci_exit(void)
  {
  #ifdef CONFIG_B44_PCI
        ssb_pcihost_unregister(&b44_pci_driver);
index df429959abdd9f8c2cbb64937ba23fcc45b84c32,818a573669e6b0b4cdd27fda3ca111ff6c133931..7b65716b8734451356d9f49584db8bdb55615776
@@@ -1,6 -1,6 +1,6 @@@
  /* cnic.c: Broadcom CNIC core network driver.
   *
 - * Copyright (c) 2006-2011 Broadcom Corporation
 + * Copyright (c) 2006-2012 Broadcom Corporation
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
@@@ -380,8 -380,6 +380,8 @@@ static int cnic_iscsi_nl_msg_recv(struc
                if (cnic_in_use(csk) &&
                    test_bit(SK_F_CONNECT_START, &csk->flags)) {
  
 +                      csk->vlan_id = path_resp->vlan_id;
 +
                        memcpy(csk->ha, path_resp->mac_addr, 6);
                        if (test_bit(SK_F_IPV6, &csk->flags))
                                memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
@@@ -2523,35 -2521,12 +2523,35 @@@ static void cnic_bnx2x_kwqe_err(struct 
        u32 cid;
        u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
        u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
 +      u32 kcqe_op;
        int ulp_type;
  
        cid = kwqe->kwqe_info0;
        memset(&kcqe, 0, sizeof(kcqe));
  
 -      if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
 +      if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
 +              u32 l5_cid = 0;
 +
 +              ulp_type = CNIC_ULP_FCOE;
 +              if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
 +                      struct fcoe_kwqe_conn_enable_disable *req;
 +
 +                      req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
 +                      kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
 +                      cid = req->context_id;
 +                      l5_cid = req->conn_id;
 +              } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
 +                      kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
 +              } else {
 +                      return;
 +              }
 +              kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
 +              kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
 +              kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR;
 +              kcqe.kcqe_info2 = cid;
 +              kcqe.kcqe_info0 = l5_cid;
 +
 +      } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
                ulp_type = CNIC_ULP_ISCSI;
                if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
                        cid = kwqe->kwqe_info1;
  
        } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
                struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
 -              u32 kcqe_op;
  
                ulp_type = CNIC_ULP_L4;
                if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
@@@ -2710,17 -2686,9 +2710,17 @@@ static int cnic_submit_bnx2x_fcoe_kwqes
                                   opcode);
                        break;
                }
 -              if (ret < 0)
 +              if (ret < 0) {
                        netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
                                   opcode);
 +
 +                      /* Possibly bnx2x parity error, send completion
 +                       * to ulp drivers with error code to speed up
 +                       * cleanup and reset recovery.
 +                       */
 +                      if (ret == -EIO || ret == -EAGAIN)
 +                              cnic_bnx2x_kwqe_err(dev, kwqe);
 +              }
                i += work;
        }
        return 0;
@@@ -3616,7 -3584,11 +3616,11 @@@ static int cnic_get_v6_route(struct soc
                fl6.flowi6_oif = dst_addr->sin6_scope_id;
  
        *dst = ip6_route_output(&init_net, NULL, &fl6);
-       if (*dst)
+       if ((*dst)->error) {
+               dst_release(*dst);
+               *dst = NULL;
+               return -ENETUNREACH;
+       } else
                return 0;
  #endif
  
@@@ -3929,8 -3901,6 +3933,8 @@@ static void cnic_cm_process_kcqe(struc
        case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
                if (l4kcqe->status == 0)
                        set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
 +              else if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_NIC_ERROR)
 +                      set_bit(SK_F_HW_ERR, &csk->flags);
  
                smp_mb__before_clear_bit();
                clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
index c758674643ce28bf236bd46c63724c259bcdcafe,c347b6236f8fb90f0169e70e293fb2b8ce0ccc39..dafea1ecb7b1cf03db3f26182f6a8b199fa3c338
@@@ -72,7 -72,7 +72,7 @@@ static int enic_set_port_profile(struc
        struct enic_port_profile *pp;
        struct vic_provinfo *vp;
        const u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
-       const u16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
+       const __be16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
        char uuid_str[38];
        char client_mac_str[18];
        u8 *client_mac;
@@@ -207,7 -207,7 +207,7 @@@ static int enic_pp_disassociate(struct 
        if (!is_zero_ether_addr(pp->mac_addr))
                ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_del_addr,
                        pp->mac_addr);
 -      else if (!is_zero_ether_addr(netdev->dev_addr))
 +      else if (vf == PORT_SELF_VF && !is_zero_ether_addr(netdev->dev_addr))
                ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_del_addr,
                        netdev->dev_addr);
  
@@@ -294,7 -294,7 +294,7 @@@ static int enic_pp_associate(struct eni
        if (!is_zero_ether_addr(pp->mac_addr))
                ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_add_addr,
                        pp->mac_addr);
 -      else if (!is_zero_ether_addr(netdev->dev_addr))
 +      else if (vf == PORT_SELF_VF && !is_zero_ether_addr(netdev->dev_addr))
                ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_add_addr,
                        netdev->dev_addr);
  
index 1b86d0b45f3c2c436ef89051283faa394fd0bd8e,55cbf65512c3bafb4780f6166e6d05ba5f314603..4ea6580d3ae8a35a3150d8e1e99e39d44bfd407b
@@@ -2328,19 -2328,11 +2328,11 @@@ jme_change_mtu(struct net_device *netde
                ((new_mtu) < IPV6_MIN_MTU))
                return -EINVAL;
  
-       if (new_mtu > 4000) {
-               jme->reg_rxcs &= ~RXCS_FIFOTHNP;
-               jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
-               jme_restart_rx_engine(jme);
-       } else {
-               jme->reg_rxcs &= ~RXCS_FIFOTHNP;
-               jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
-               jme_restart_rx_engine(jme);
-       }
  
        netdev->mtu = new_mtu;
        netdev_update_features(netdev);
  
+       jme_restart_rx_engine(jme);
        jme_reset_link(jme);
  
        return 0;
@@@ -2999,6 -2991,7 +2991,6 @@@ jme_init_one(struct pci_dev *pdev
         */
        netdev = alloc_etherdev(sizeof(*jme));
        if (!netdev) {
 -              pr_err("Cannot allocate netdev structure\n");
                rc = -ENOMEM;
                goto err_out_release_regions;
        }
index 0809b7c3905c8263448f24402f1580bf12f66080,d498f049c74ecfa6578f02f1257e1063291bbe0d..5f15014713bc7e213cb024cc3bb9680c6fb04be1
@@@ -531,15 -531,14 +531,14 @@@ int mlx4_change_port_types(struct mlx4_
        for (port = 0; port <  dev->caps.num_ports; port++) {
                /* Change the port type only if the new type is different
                 * from the current, and not set to Auto */
-               if (port_types[port] != dev->caps.port_type[port + 1]) {
+               if (port_types[port] != dev->caps.port_type[port + 1])
                        change = 1;
-                       dev->caps.port_type[port + 1] = port_types[port];
-               }
        }
        if (change) {
                mlx4_unregister_device(dev);
                for (port = 1; port <= dev->caps.num_ports; port++) {
                        mlx4_CLOSE_PORT(dev, port);
+                       dev->caps.port_type[port] = port_types[port - 1];
                        err = mlx4_SET_PORT(dev, port);
                        if (err) {
                                mlx4_err(dev, "Failed to set port %d, "
@@@ -986,6 -985,9 +985,9 @@@ static int map_bf_area(struct mlx4_dev 
        resource_size_t bf_len;
        int err = 0;
  
+       if (!dev->caps.bf_reg_size)
+               return -ENXIO;
        bf_start = pci_resource_start(dev->pdev, 2) +
                        (dev->caps.num_uars << PAGE_SHIFT);
        bf_len = pci_resource_len(dev->pdev, 2) -
@@@ -1543,11 -1545,13 +1545,11 @@@ static int mlx4_init_steering(struct ml
        if (!priv->steer)
                return -ENOMEM;
  
 -      for (i = 0; i < num_entries; i++) {
 +      for (i = 0; i < num_entries; i++)
                for (j = 0; j < MLX4_NUM_STEERS; j++) {
                        INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
                        INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
                }
 -              INIT_LIST_HEAD(&priv->steer[i].high_prios);
 -      }
        return 0;
  }
  
@@@ -1823,7 -1827,7 +1825,7 @@@ slave_start
                goto err_master_mfunc;
  
        priv->msix_ctl.pool_bm = 0;
-       spin_lock_init(&priv->msix_ctl.pool_lock);
+       mutex_init(&priv->msix_ctl.pool_lock);
  
        mlx4_enable_msi_x(dev);
        if ((mlx4_is_mfunc(dev)) &&
index 276c7f72bd83360d696039f33478c42c919367d5,28f8251561f4a24d67697f538d4e25834aeaa5d6..5da51b99dbb862afe13ef16be936b350815f9986
@@@ -697,12 -697,13 +697,12 @@@ struct mlx4_sense 
  
  struct mlx4_msix_ctl {
        u64             pool_bm;
-       spinlock_t      pool_lock;
+       struct mutex    pool_lock;
  };
  
  struct mlx4_steer {
        struct list_head promisc_qps[MLX4_NUM_STEERS];
        struct list_head steer_entries[MLX4_NUM_STEERS];
 -      struct list_head high_prios;
  };
  
  struct mlx4_priv {
index 10d57983df05729cd8105db649a40ed800ffab50,2784bc706f1e2cd4cca9ed6289f09105ece35bf3..b8104d9f40810871525eb360edaf16e483f6e945
@@@ -1,5 -1,5 +1,5 @@@
  /**
 - * drivers/net/ks8851_mll.c
 + * drivers/net/ethernet/micrel/ks8851_mll.c
   * Copyright (c) 2009 Micrel Inc.
   *
   * This program is free software; you can redistribute it and/or modify
@@@ -794,7 -794,7 +794,7 @@@ static void ks_rcv(struct ks_net *ks, s
  
        frame_hdr = ks->frame_head_info;
        while (ks->frame_cnt--) {
 -              skb = dev_alloc_skb(frame_hdr->len + 16);
 +              skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
                if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
                        (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
                        skb_reserve(skb, 2);
@@@ -837,7 -837,7 +837,7 @@@ static void ks_update_link_status(struc
  
  /**
   * ks_irq - device interrupt handler
 - * @irq: Interrupt number passed from the IRQ hnalder.
 + * @irq: Interrupt number passed from the IRQ handler.
   * @pw: The private word passed to register_irq(), our struct ks_net.
   *
   * This is the handler invoked to find out what happened
@@@ -1239,7 -1239,6 +1239,7 @@@ static int ks_set_mac_address(struct ne
        struct sockaddr *addr = paddr;
        u8 *da;
  
 +      netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  
        da = (u8 *)netdev->dev_addr;
@@@ -1500,8 -1499,10 +1500,8 @@@ static int ks_hw_init(struct ks_net *ks
        ks->mcast_lst_size = 0;
  
        ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
 -      if (!ks->frame_head_info) {
 -              pr_err("Error: Fail to allocate frame memory\n");
 +      if (!ks->frame_head_info)
                return false;
 -      }
  
        ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
        return true;
@@@ -1544,7 -1545,7 +1544,7 @@@ static int __devinit ks8851_probe(struc
  
        netdev->irq = platform_get_irq(pdev, 0);
  
-       if (netdev->irq < 0) {
+       if ((int)netdev->irq < 0) {
                err = netdev->irq;
                goto err_get_irq;
        }
index 506d246699569138cd6a7bc38692e3ecdfbb9419,fc52fca7419338676a8810a6dd97d8b0c137a39a..1ba290d0c21cd1606e21a2965a86071ca0b595c6
@@@ -98,8 -98,8 +98,8 @@@ static inline unsigned int efx_rx_buf_o
        /* Offset is always within one page, so we don't need to consider
         * the page order.
         */
 -      return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
 -              efx->type->rx_buffer_hash_size);
 +      return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
 +              efx->type->rx_buffer_hash_size;
  }
  static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
  {
  
  static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
  {
 -      if (buf->is_page)
 +      if (buf->flags & EFX_RX_BUF_PAGE)
                return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
        else
 -              return ((u8 *)buf->u.skb->data +
 -                      efx->type->rx_buffer_hash_size);
 +              return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
  }
  
  static inline u32 efx_rx_buf_hash(const u8 *eh)
        return __le32_to_cpup((const __le32 *)(eh - 4));
  #else
        const u8 *data = eh - 4;
 -      return ((u32)data[0]       |
 -              (u32)data[1] << 8  |
 -              (u32)data[2] << 16 |
 -              (u32)data[3] << 24);
 +      return (u32)data[0]       |
 +             (u32)data[1] << 8  |
 +             (u32)data[2] << 16 |
 +             (u32)data[3] << 24;
  #endif
  }
  
@@@ -155,11 -156,10 +155,10 @@@ static int efx_init_rx_buffers_skb(stru
                if (unlikely(!skb))
                        return -ENOMEM;
  
-               /* Adjust the SKB for padding and checksum */
+               /* Adjust the SKB for padding */
                skb_reserve(skb, NET_IP_ALIGN);
                rx_buf->len = skb_len - NET_IP_ALIGN;
 -              rx_buf->is_page = false;
 +              rx_buf->flags = 0;
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
  
                rx_buf->dma_addr = pci_map_single(efx->pci_dev,
                                                  skb->data, rx_buf->len,
@@@ -227,7 -227,7 +226,7 @@@ static int efx_init_rx_buffers_page(str
                rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
                rx_buf->u.page = page;
                rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
 -              rx_buf->is_page = true;
 +              rx_buf->flags = EFX_RX_BUF_PAGE;
                ++rx_queue->added_count;
                ++rx_queue->alloc_page_count;
                ++state->refcnt;
  static void efx_unmap_rx_buffer(struct efx_nic *efx,
                                struct efx_rx_buffer *rx_buf)
  {
 -      if (rx_buf->is_page && rx_buf->u.page) {
 +      if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
                struct efx_rx_page_state *state;
  
                state = page_address(rx_buf->u.page);
                                       efx_rx_buf_size(efx),
                                       PCI_DMA_FROMDEVICE);
                }
 -      } else if (!rx_buf->is_page && rx_buf->u.skb) {
 +      } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
                pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
                                 rx_buf->len, PCI_DMA_FROMDEVICE);
        }
  static void efx_free_rx_buffer(struct efx_nic *efx,
                               struct efx_rx_buffer *rx_buf)
  {
 -      if (rx_buf->is_page && rx_buf->u.page) {
 +      if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
                __free_pages(rx_buf->u.page, efx->rx_buffer_order);
                rx_buf->u.page = NULL;
 -      } else if (!rx_buf->is_page && rx_buf->u.skb) {
 +      } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
                dev_kfree_skb_any(rx_buf->u.skb);
                rx_buf->u.skb = NULL;
        }
@@@ -310,7 -310,7 +309,7 @@@ static void efx_resurrect_rx_buffer(str
        new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
        new_buf->u.page = rx_buf->u.page;
        new_buf->len = rx_buf->len;
 -      new_buf->is_page = true;
 +      new_buf->flags = EFX_RX_BUF_PAGE;
        ++rx_queue->added_count;
  }
  
@@@ -324,10 -324,7 +323,10 @@@ static void efx_recycle_rx_buffer(struc
        struct efx_rx_buffer *new_buf;
        unsigned index;
  
 -      if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
 +      rx_buf->flags &= EFX_RX_BUF_PAGE;
 +
 +      if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
 +          efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
            page_count(rx_buf->u.page) == 1)
                efx_resurrect_rx_buffer(rx_queue, rx_buf);
  
@@@ -405,15 -402,17 +404,15 @@@ void efx_fast_push_rx_descriptors(struc
  void efx_rx_slow_fill(unsigned long context)
  {
        struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
 -      struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
  
        /* Post an event to cause NAPI to run and refill the queue */
 -      efx_nic_generate_fill_event(channel);
 +      efx_nic_generate_fill_event(rx_queue);
        ++rx_queue->slow_fill_count;
  }
  
  static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
                                     struct efx_rx_buffer *rx_buf,
 -                                   int len, bool *discard,
 -                                   bool *leak_packet)
 +                                   int len, bool *leak_packet)
  {
        struct efx_nic *efx = rx_queue->efx;
        unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
        /* The packet must be discarded, but this is only a fatal error
         * if the caller indicated it was
         */
 -      *discard = true;
 +      rx_buf->flags |= EFX_RX_PKT_DISCARD;
  
        if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
                if (net_ratelimit())
                 * data at the end of the skb will be trashed. So
                 * we have no choice but to leak the fragment.
                 */
 -              *leak_packet = !rx_buf->is_page;
 +              *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
                efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
        } else {
                if (net_ratelimit())
   */
  static void efx_rx_packet_gro(struct efx_channel *channel,
                              struct efx_rx_buffer *rx_buf,
 -                            const u8 *eh, bool checksummed)
 +                            const u8 *eh)
  {
        struct napi_struct *napi = &channel->napi_str;
        gro_result_t gro_result;
  
        /* Pass the skb/page into the GRO engine */
 -      if (rx_buf->is_page) {
 +      if (rx_buf->flags & EFX_RX_BUF_PAGE) {
                struct efx_nic *efx = channel->efx;
                struct page *page = rx_buf->u.page;
                struct sk_buff *skb;
                skb->len = rx_buf->len;
                skb->data_len = rx_buf->len;
                skb->truesize += rx_buf->len;
 -              skb->ip_summed =
 -                      checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
 +              skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
 +                                CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
  
                skb_record_rx_queue(skb, channel->channel);
  
        } else {
                struct sk_buff *skb = rx_buf->u.skb;
  
 -              EFX_BUG_ON_PARANOID(!checksummed);
 +              EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED));
                rx_buf->u.skb = NULL;
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
  
                gro_result = napi_gro_receive(napi, skb);
        }
  }
  
  void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
 -                 unsigned int len, bool checksummed, bool discard)
 +                 unsigned int len, u16 flags)
  {
        struct efx_nic *efx = rx_queue->efx;
        struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
        bool leak_packet = false;
  
        rx_buf = efx_rx_buffer(rx_queue, index);
 +      rx_buf->flags |= flags;
  
        /* This allows the refill path to post another buffer.
         * EFX_RXD_HEAD_ROOM ensures that the slot we are using
        rx_queue->removed_count++;
  
        /* Validate the length encoded in the event vs the descriptor pushed */
 -      efx_rx_packet__check_len(rx_queue, rx_buf, len,
 -                               &discard, &leak_packet);
 +      efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
  
        netif_vdbg(efx, rx_status, efx->net_dev,
                   "RX queue %d received id %x at %llx+%x %s%s\n",
                   efx_rx_queue_index(rx_queue), index,
                   (unsigned long long)rx_buf->dma_addr, len,
 -                 (checksummed ? " [SUMMED]" : ""),
 -                 (discard ? " [DISCARD]" : ""));
 +                 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
 +                 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
  
        /* Discard packet, if instructed to do so */
 -      if (unlikely(discard)) {
 +      if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
                if (unlikely(leak_packet))
                        channel->n_skbuff_leaks++;
                else
        rx_buf->len = len - efx->type->rx_buffer_hash_size;
  out:
        if (channel->rx_pkt)
 -              __efx_rx_packet(channel,
 -                              channel->rx_pkt, channel->rx_pkt_csummed);
 +              __efx_rx_packet(channel, channel->rx_pkt);
        channel->rx_pkt = rx_buf;
 -      channel->rx_pkt_csummed = checksummed;
 +}
 +
 +static void efx_rx_deliver(struct efx_channel *channel,
 +                         struct efx_rx_buffer *rx_buf)
 +{
 +      struct sk_buff *skb;
 +
 +      /* We now own the SKB */
 +      skb = rx_buf->u.skb;
 +      rx_buf->u.skb = NULL;
 +
 +      /* Set the SKB flags */
 +      skb_checksum_none_assert(skb);
 +
 +      /* Pass the packet up */
 +      netif_receive_skb(skb);
 +
 +      /* Update allocation strategy method */
 +      channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
  }
  
  /* Handle a received packet.  Second half: Touches packet payload. */
 -void __efx_rx_packet(struct efx_channel *channel,
 -                   struct efx_rx_buffer *rx_buf, bool checksummed)
 +void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
  {
        struct efx_nic *efx = channel->efx;
 -      struct sk_buff *skb;
        u8 *eh = efx_rx_buf_eh(efx, rx_buf);
  
        /* If we're in loopback test, then pass the packet directly to the
                return;
        }
  
 -      if (!rx_buf->is_page) {
 -              skb = rx_buf->u.skb;
 +      if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
 +              struct sk_buff *skb = rx_buf->u.skb;
  
                prefetch(skb_shinfo(skb));
  
        }
  
        if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
 -              checksummed = false;
 -
 -      if (likely(checksummed || rx_buf->is_page)) {
 -              efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
 -              return;
 -      }
 +              rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
  
 -      /* We now own the SKB */
 -      skb = rx_buf->u.skb;
 -      rx_buf->u.skb = NULL;
 -
 -      /* Set the SKB flags */
 -      skb_checksum_none_assert(skb);
 -
 -      /* Pass the packet up */
 -      netif_receive_skb(skb);
 -
 -      /* Update allocation strategy method */
 -      channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
 +      if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)))
 +              efx_rx_packet_gro(channel, rx_buf, eh);
 +      else
 +              efx_rx_deliver(channel, rx_buf);
  }
  
  void efx_rx_strategy(struct efx_channel *channel)
@@@ -705,7 -703,6 +705,7 @@@ void efx_init_rx_queue(struct efx_rx_qu
        rx_queue->fast_fill_limit = limit;
  
        /* Set up RX descriptor ring */
 +      rx_queue->enabled = true;
        efx_nic_init_rx(rx_queue);
  }
  
@@@ -717,9 -714,6 +717,9 @@@ void efx_fini_rx_queue(struct efx_rx_qu
        netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
                  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
  
 +      /* A flush failure might have left rx_queue->enabled */
 +      rx_queue->enabled = false;
 +
        del_timer_sync(&rx_queue->slow_fill);
        efx_nic_fini_rx(rx_queue);
  
index 6d74cb7aaae8ab9426d63e50f05a190a6d14c816,4b2f54565f64adece3f69b5a89b7997666ce16e5..174a3348f6762d67a661801342ea6cbe95a7810f
@@@ -992,9 -992,10 +992,9 @@@ static irqreturn_t emac_irq(int irq, vo
  
  static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
  {
 -      struct sk_buff *skb = dev_alloc_skb(priv->rx_buf_size);
 +      struct sk_buff *skb = netdev_alloc_skb(priv->ndev, priv->rx_buf_size);
        if (WARN_ON(!skb))
                return NULL;
 -      skb->dev = priv->ndev;
        skb_reserve(skb, NET_IP_ALIGN);
        return skb;
  }
@@@ -1008,7 -1009,7 +1008,7 @@@ static void emac_rx_handler(void *token
        int                     ret;
  
        /* free and bail if we are shutting down */
-       if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
+       if (unlikely(!netif_running(ndev))) {
                dev_kfree_skb_any(skb);
                return;
        }
  recycle:
        ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
                        skb_tailroom(skb), GFP_KERNEL);
-       if (WARN_ON(ret < 0))
+       WARN_ON(ret == -ENOMEM);
+       if (unlikely(ret < 0))
                dev_kfree_skb_any(skb);
  }
  
@@@ -1254,15 -1257,15 +1256,15 @@@ static int emac_dev_setmac_addr(struct 
        struct sockaddr *sa = addr;
  
        if (!is_valid_ether_addr(sa->sa_data))
 -              return -EINVAL;
 +              return -EADDRNOTAVAIL;
  
        /* Store mac addr in priv and rx channel and set it in EMAC hw */
        memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
        memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
 +      ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
  
        /* MAC address is configured only after the interface is enabled. */
        if (netif_running(ndev)) {
 -              memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
                emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
        }
  
@@@ -1789,6 -1792,7 +1791,6 @@@ static int __devinit davinci_emac_probe
  
        ndev = alloc_etherdev(sizeof(struct emac_priv));
        if (!ndev) {
 -              dev_err(&pdev->dev, "error allocating net_device\n");
                rc = -ENOMEM;
                goto free_clk;
        }
  
        if (!is_valid_ether_addr(priv->mac_addr)) {
                /* Use random MAC if none passed */
 -              random_ether_addr(priv->mac_addr);
 +              eth_hw_addr_random(ndev);
 +              memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
                dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
                                                        priv->mac_addr);
        }
index 93a86397af36a970ff2dcb18708b70eea4e966a6,486b4048850df8afca33d4183b436965b6a23137..6d4d2ebb0a8a7252cb664d868fc06e37de1b5fd5
@@@ -2024,14 -2024,22 +2024,22 @@@ ppp_mp_reconstruct(struct ppp *ppp
                        continue;
                }
                if (PPP_MP_CB(p)->sequence != seq) {
+                       u32 oldseq;
                        /* Fragment `seq' is missing.  If it is after
                           minseq, it might arrive later, so stop here. */
                        if (seq_after(seq, minseq))
                                break;
                        /* Fragment `seq' is lost, keep going. */
                        lost = 1;
+                       oldseq = seq;
                        seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
                                minseq + 1: PPP_MP_CB(p)->sequence;
+                       if (ppp->debug & 1)
+                               netdev_printk(KERN_DEBUG, ppp->dev,
+                                             "lost frag %u..%u\n",
+                                             oldseq, seq-1);
                        goto again;
                }
  
                        struct sk_buff *tmp2;
  
                        skb_queue_reverse_walk_from_safe(list, p, tmp2) {
+                               if (ppp->debug & 1)
+                                       netdev_printk(KERN_DEBUG, ppp->dev,
+                                                     "discarding frag %u\n",
+                                                     PPP_MP_CB(p)->sequence);
                                __skb_unlink(p, list);
                                kfree_skb(p);
                        }
                /* If we have discarded any fragments,
                   signal a receive error. */
                if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
+                       skb_queue_walk_safe(list, p, tmp) {
+                               if (p == head)
+                                       break;
+                               if (ppp->debug & 1)
+                                       netdev_printk(KERN_DEBUG, ppp->dev,
+                                                     "discarding frag %u\n",
+                                                     PPP_MP_CB(p)->sequence);
+                               __skb_unlink(p, list);
+                               kfree_skb(p);
+                       }
                        if (ppp->debug & 1)
                                netdev_printk(KERN_DEBUG, ppp->dev,
                                              "  missed pkts %u..%u\n",
  
                                skb->len += p->len;
                                skb->data_len += p->len;
 -                              skb->truesize += p->len;
 +                              skb->truesize += p->truesize;
  
                                if (p == tail)
                                        break;
index e1562e8acba5b0d68ed220422c3394e4e048c5e8,3dcd3857a36cb8346c82c8131c1131da40a53063..adf527e27583fa98e32636ee1e9401d86646b8c5
@@@ -537,8 -537,11 +537,8 @@@ vmxnet3_tq_create(struct vmxnet3_tx_que
  
        tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
                               GFP_KERNEL);
 -      if (!tq->buf_info) {
 -              printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
 -                     adapter->netdev->name);
 +      if (!tq->buf_info)
                goto err;
 -      }
  
        return 0;
  
@@@ -633,7 -636,7 +633,7 @@@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_
  
        dev_dbg(&adapter->netdev->dev,
                "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
 -              "%u, uncommited %u\n", num_allocated, ring->next2fill,
 +              "%u, uncommitted %u\n", num_allocated, ring->next2fill,
                ring->next2comp, rq->uncommitted[ring_idx]);
  
        /* so that the device can distinguish a full ring and an empty ring */
@@@ -813,32 -816,35 +813,32 @@@ vmxnet3_parse_and_copy_hdr(struct sk_bu
  
        if (ctx->mss) { /* TSO */
                ctx->eth_ip_hdr_size = skb_transport_offset(skb);
 -              ctx->l4_hdr_size = ((struct tcphdr *)
 -                                 skb_transport_header(skb))->doff * 4;
 +              ctx->l4_hdr_size = tcp_hdrlen(skb);
                ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
        } else {
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
                        ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
  
                        if (ctx->ipv4) {
 -                              struct iphdr *iph = (struct iphdr *)
 -                                                  skb_network_header(skb);
 +                              const struct iphdr *iph = ip_hdr(skb);
 +
                                if (iph->protocol == IPPROTO_TCP)
 -                                      ctx->l4_hdr_size = ((struct tcphdr *)
 -                                         skb_transport_header(skb))->doff * 4;
 +                                      ctx->l4_hdr_size = tcp_hdrlen(skb);
                                else if (iph->protocol == IPPROTO_UDP)
                                        /*
                                         * Use tcp header size so that bytes to
                                         * be copied are more than required by
                                         * the device.
                                         */
 -                                      ctx->l4_hdr_size =
 -                                                      sizeof(struct tcphdr);
 +                                      ctx->l4_hdr_size = sizeof(struct tcphdr);
                                else
                                        ctx->l4_hdr_size = 0;
                        } else {
                                /* for simplicity, don't copy L4 headers */
                                ctx->l4_hdr_size = 0;
                        }
-                       ctx->copy_size = ctx->eth_ip_hdr_size +
-                                        ctx->l4_hdr_size;
+                       ctx->copy_size = min(ctx->eth_ip_hdr_size +
+                                        ctx->l4_hdr_size, skb->len);
                } else {
                        ctx->eth_ip_hdr_size = 0;
                        ctx->l4_hdr_size = 0;
@@@ -875,17 -881,14 +875,17 @@@ static voi
  vmxnet3_prepare_tso(struct sk_buff *skb,
                    struct vmxnet3_tx_ctx *ctx)
  {
 -      struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
 +      struct tcphdr *tcph = tcp_hdr(skb);
 +
        if (ctx->ipv4) {
 -              struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
 +              struct iphdr *iph = ip_hdr(skb);
 +
                iph->check = 0;
                tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
                                                 IPPROTO_TCP, 0);
        } else {
 -              struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
 +              struct ipv6hdr *iph = ipv6_hdr(skb);
 +
                tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
                                               IPPROTO_TCP, 0);
        }
@@@ -1516,9 -1519,11 +1516,9 @@@ vmxnet3_rq_create(struct vmxnet3_rx_que
        sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
                                                   rq->rx_ring[1].size);
        bi = kzalloc(sz, GFP_KERNEL);
 -      if (!bi) {
 -              printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
 -                     adapter->netdev->name);
 +      if (!bi)
                goto err;
 -      }
 +
        rq->buf_info[0] = bi;
        rq->buf_info[1] = bi + rq->rx_ring[0].size;
  
@@@ -2918,8 -2923,11 +2918,8 @@@ vmxnet3_probe_device(struct pci_dev *pd
        printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
               num_tx_queues, num_rx_queues);
  
 -      if (!netdev) {
 -              printk(KERN_ERR "Failed to alloc ethernet device for adapter "
 -                      "%s\n", pci_name(pdev));
 +      if (!netdev)
                return -ENOMEM;
 -      }
  
        pci_set_drvdata(pdev, netdev);
        adapter = netdev_priv(netdev);
  
        adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
        if (adapter->pm_conf == NULL) {
 -              printk(KERN_ERR "Failed to allocate memory for %s\n",
 -                      pci_name(pdev));
                err = -ENOMEM;
                goto err_alloc_pm;
        }
  
        adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
        if (adapter->rss_conf == NULL) {
 -              printk(KERN_ERR "Failed to allocate memory for %s\n",
 -                     pci_name(pdev));
                err = -ENOMEM;
                goto err_alloc_rss;
        }
diff --combined include/linux/skbuff.h
index 06a4c0fd7bef79dcb1c61411d36713dfb87847a3,ae86adee3746aec6661ef35e2b18cf62e0d17780..79ef8209bbb706b51a2276df717d97d83dec3234
@@@ -361,7 -361,6 +361,7 @@@ typedef unsigned char *sk_buff_data_t
   *            ports.
   *    @wifi_acked_valid: wifi_acked was set
   *    @wifi_acked: whether frame was acked on wifi or not
 + *    @no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
   *    @dma_cookie: a cookie to one of several possible DMA operations
   *            done by skb DMA functions
   *    @secmark: security marking
@@@ -439,11 -438,6 +439,11 @@@ struct sk_buff 
  #endif
  
        int                     skb_iif;
 +
 +      __u32                   rxhash;
 +
 +      __u16                   vlan_tci;
 +
  #ifdef CONFIG_NET_SCHED
        __u16                   tc_index;       /* traffic control index */
  #ifdef CONFIG_NET_CLS_ACT
  #endif
  #endif
  
 -      __u32                   rxhash;
 -
        __u16                   queue_mapping;
        kmemcheck_bitfield_begin(flags2);
  #ifdef CONFIG_IPV6_NDISC_NODETYPE
        __u8                    l4_rxhash:1;
        __u8                    wifi_acked_valid:1;
        __u8                    wifi_acked:1;
 -      /* 10/12 bit hole (depending on ndisc_nodetype presence) */
 +      __u8                    no_fcs:1;
 +      /* 9/11 bit hole (depending on ndisc_nodetype presence) */
        kmemcheck_bitfield_end(flags2);
  
  #ifdef CONFIG_NET_DMA
                __u32           dropcount;
        };
  
 -      __u16                   vlan_tci;
 -
        sk_buff_data_t          transport_header;
        sk_buff_data_t          network_header;
        sk_buff_data_t          mac_header;
@@@ -878,24 -875,6 +878,24 @@@ static inline struct sk_buff *skb_peek(
        return list;
  }
  
 +/**
 + *    skb_peek_next - peek skb following the given one from a queue
 + *    @skb: skb to start from
 + *    @list_: list to peek at
 + *
 + *    Returns %NULL when the end of the list is met or a pointer to the
 + *    next element. The reference count is not incremented and the
 + *    reference is therefore volatile. Use with caution.
 + */
 +static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
 +              const struct sk_buff_head *list_)
 +{
 +      struct sk_buff *next = skb->next;
 +      if (next == (struct sk_buff *)list_)
 +              next = NULL;
 +      return next;
 +}
 +
  /**
   *    skb_peek_tail - peek at the tail of an &sk_buff_head
   *    @list_: list to peek at
@@@ -1486,6 -1465,16 +1486,16 @@@ static inline void skb_set_mac_header(s
  }
  #endif /* NET_SKBUFF_DATA_USES_OFFSET */
  
+ static inline void skb_mac_header_rebuild(struct sk_buff *skb)
+ {
+       if (skb_mac_header_was_set(skb)) {
+               const unsigned char *old_mac = skb_mac_header(skb);
+               skb_set_mac_header(skb, -skb->mac_len);
+               memmove(skb_mac_header(skb), old_mac, skb->mac_len);
+       }
+ }
  static inline int skb_checksum_start_offset(const struct sk_buff *skb)
  {
        return skb->csum_start - skb_headroom(skb);
@@@ -2066,7 -2055,7 +2076,7 @@@ static inline void skb_frag_add_head(st
        for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
  
  extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
 -                                         int *peeked, int *err);
 +                                         int *peeked, int *off, int *err);
  extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
                                         int noblock, int *err);
  extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
diff --combined net/atm/clip.c
index ef95a30306fa3e7845e4778c9757dca10efd5ce2,127fe70a1baa3d8ecf937b76367767b9bf46f9e0..5de42ea309bc9e1d9c4dbdb583b04fa5814651ba
@@@ -46,8 -46,8 +46,8 @@@
  
  static struct net_device *clip_devs;
  static struct atm_vcc *atmarpd;
- static struct neigh_table clip_tbl;
  static struct timer_list idle_timer;
+ static const struct neigh_ops clip_neigh_ops;
  
  static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
  {
@@@ -123,6 -123,8 +123,8 @@@ static int neigh_check_cb(struct neighb
        struct atmarp_entry *entry = neighbour_priv(n);
        struct clip_vcc *cv;
  
+       if (n->ops != &clip_neigh_ops)
+               return 0;
        for (cv = entry->vccs; cv; cv = cv->next) {
                unsigned long exp = cv->last_use + cv->idle_timeout;
  
  
  static void idle_timer_check(unsigned long dummy)
  {
-       write_lock(&clip_tbl.lock);
-       __neigh_for_each_release(&clip_tbl, neigh_check_cb);
+       write_lock(&arp_tbl.lock);
+       __neigh_for_each_release(&arp_tbl, neigh_check_cb);
        mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
-       write_unlock(&clip_tbl.lock);
+       write_unlock(&arp_tbl.lock);
  }
  
  static int clip_arp_rcv(struct sk_buff *skb)
@@@ -328,8 -330,6 +330,8 @@@ static netdev_tx_t clip_start_xmit(stru
        struct atmarp_entry *entry;
        struct neighbour *n;
        struct atm_vcc *vcc;
 +      struct rtable *rt;
 +      __be32 *daddr;
        int old;
        unsigned long flags;
  
                dev->stats.tx_dropped++;
                return NETDEV_TX_OK;
        }
 -      n = dst_get_neighbour_noref(dst);
 +      rt = (struct rtable *) dst;
 +      if (rt->rt_gateway)
 +              daddr = &rt->rt_gateway;
 +      else
 +              daddr = &ip_hdr(skb)->daddr;
 +      n = dst_neigh_lookup(dst, daddr);
        if (!n) {
                pr_err("NO NEIGHBOUR !\n");
                dev_kfree_skb(skb);
                        dev_kfree_skb(skb);
                        dev->stats.tx_dropped++;
                }
 -              return NETDEV_TX_OK;
 +              goto out_release_neigh;
        }
        pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
        ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
        old = xchg(&entry->vccs->xoff, 1);      /* assume XOFF ... */
        if (old) {
                pr_warning("XOFF->XOFF transition\n");
 -              return NETDEV_TX_OK;
 +              goto out_release_neigh;
        }
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb->len;
        vcc->send(vcc, skb);
        if (atm_may_send(vcc, 0)) {
                entry->vccs->xoff = 0;
 -              return NETDEV_TX_OK;
 +              goto out_release_neigh;
        }
        spin_lock_irqsave(&clip_priv->xoff_lock, flags);
        netif_stop_queue(dev);  /* XOFF -> throttle immediately */
           of the brief netif_stop_queue. If this isn't true or if it
           changes, use netif_wake_queue instead. */
        spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
 +out_release_neigh:
 +      neigh_release(n);
        return NETDEV_TX_OK;
  }
  
diff --combined net/core/neighbour.c
index f98ec444133aaf7a466d32ef2a176dbdf82e6438,2a83914b027743fbf047aae18315ab0197bc6e0c..0a68045782d18a635d8071a8ce673f4d1dd95379
@@@ -826,6 -826,8 +826,8 @@@ next_elt
                write_unlock_bh(&tbl->lock);
                cond_resched();
                write_lock_bh(&tbl->lock);
+               nht = rcu_dereference_protected(tbl->nht,
+                                               lockdep_is_held(&tbl->lock));
        }
        /* Cycle through all hash buckets every base_reachable_time/2 ticks.
         * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
@@@ -2165,35 -2167,6 +2167,35 @@@ nla_put_failure
        return -EMSGSIZE;
  }
  
 +static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
 +                          u32 pid, u32 seq, int type, unsigned int flags,
 +                          struct neigh_table *tbl)
 +{
 +      struct nlmsghdr *nlh;
 +      struct ndmsg *ndm;
 +
 +      nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
 +      if (nlh == NULL)
 +              return -EMSGSIZE;
 +
 +      ndm = nlmsg_data(nlh);
 +      ndm->ndm_family  = tbl->family;
 +      ndm->ndm_pad1    = 0;
 +      ndm->ndm_pad2    = 0;
 +      ndm->ndm_flags   = pn->flags | NTF_PROXY;
 +      ndm->ndm_type    = NDA_DST;
 +      ndm->ndm_ifindex = pn->dev->ifindex;
 +      ndm->ndm_state   = NUD_NONE;
 +
 +      NLA_PUT(skb, NDA_DST, tbl->key_len, pn->key);
 +
 +      return nlmsg_end(skb, nlh);
 +
 +nla_put_failure:
 +      nlmsg_cancel(skb, nlh);
 +      return -EMSGSIZE;
 +}
 +
  static void neigh_update_notify(struct neighbour *neigh)
  {
        call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
        return rc;
  }
  
 +static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
 +                           struct netlink_callback *cb)
 +{
 +      struct pneigh_entry *n;
 +      struct net *net = sock_net(skb->sk);
 +      int rc, h, s_h = cb->args[3];
 +      int idx, s_idx = idx = cb->args[4];
 +
 +      read_lock_bh(&tbl->lock);
 +
 +      for (h = 0; h <= PNEIGH_HASHMASK; h++) {
 +              if (h < s_h)
 +                      continue;
 +              if (h > s_h)
 +                      s_idx = 0;
 +              for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
 +                      if (dev_net(n->dev) != net)
 +                              continue;
 +                      if (idx < s_idx)
 +                              goto next;
 +                      if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
 +                                          cb->nlh->nlmsg_seq,
 +                                          RTM_NEWNEIGH,
 +                                          NLM_F_MULTI, tbl) <= 0) {
 +                              read_unlock_bh(&tbl->lock);
 +                              rc = -1;
 +                              goto out;
 +                      }
 +              next:
 +                      idx++;
 +              }
 +      }
 +
 +      read_unlock_bh(&tbl->lock);
 +      rc = skb->len;
 +out:
 +      cb->args[3] = h;
 +      cb->args[4] = idx;
 +      return rc;
 +
 +}
 +
  static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
  {
        struct neigh_table *tbl;
        int t, family, s_t;
 +      int proxy = 0;
 +      int err = 0;
  
        read_lock(&neigh_tbl_lock);
        family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
 +
 +      /* check for full ndmsg structure presence, family member is
 +       * the same for both structures
 +       */
 +      if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
 +          ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
 +              proxy = 1;
 +
        s_t = cb->args[0];
  
 -      for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
 +      for (tbl = neigh_tables, t = 0; tbl && (err >= 0);
 +           tbl = tbl->next, t++) {
                if (t < s_t || (family && tbl->family != family))
                        continue;
                if (t > s_t)
                        memset(&cb->args[1], 0, sizeof(cb->args) -
                                                sizeof(cb->args[0]));
 -              if (neigh_dump_table(tbl, skb, cb) < 0)
 -                      break;
 +              if (proxy)
 +                      err = pneigh_dump_table(tbl, skb, cb);
 +              else
 +                      err = neigh_dump_table(tbl, skb, cb);
        }
        read_unlock(&neigh_tbl_lock);
  
diff --combined net/core/rtnetlink.c
index 7aef62e53113475f343a053a3cae866c8b4a3394,606a6e8f3671defcc1eab21a471f0468f54f3359..5cf39cd7da85e85d7dc5e913a80735eb9acb2838
@@@ -60,7 -60,6 +60,6 @@@ struct rtnl_link 
  };
  
  static DEFINE_MUTEX(rtnl_mutex);
- static u16 min_ifinfo_dump_size;
  
  void rtnl_lock(void)
  {
@@@ -724,10 -723,11 +723,11 @@@ static void copy_rtnl_link_stats64(voi
  }
  
  /* All VF info */
- static inline int rtnl_vfinfo_size(const struct net_device *dev)
+ static inline int rtnl_vfinfo_size(const struct net_device *dev,
+                                  u32 ext_filter_mask)
  {
-       if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
+       if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
+           (ext_filter_mask & RTEXT_FILTER_VF)) {
                int num_vfs = dev_num_vf(dev->dev.parent);
                size_t size = nla_total_size(sizeof(struct nlattr));
                size += nla_total_size(num_vfs * sizeof(struct nlattr));
@@@ -766,7 -766,8 +766,8 @@@ static size_t rtnl_port_size(const stru
                return port_self_size;
  }
  
- static noinline size_t if_nlmsg_size(const struct net_device *dev)
+ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+                                    u32 ext_filter_mask)
  {
        return NLMSG_ALIGN(sizeof(struct ifinfomsg))
               + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
               + nla_total_size(4) /* IFLA_MASTER */
               + nla_total_size(1) /* IFLA_OPERSTATE */
               + nla_total_size(1) /* IFLA_LINKMODE */
-              + nla_total_size(4) /* IFLA_NUM_VF */
-              + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */
+              + nla_total_size(ext_filter_mask
+                               & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+              + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
               + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
               + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
               + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
@@@ -868,7 -870,7 +870,7 @@@ static int rtnl_port_fill(struct sk_buf
  
  static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                            int type, u32 pid, u32 seq, u32 change,
-                           unsigned int flags)
+                           unsigned int flags, u32 ext_filter_mask)
  {
        struct ifinfomsg *ifm;
        struct nlmsghdr *nlh;
                goto nla_put_failure;
        copy_rtnl_link_stats64(nla_data(attr), stats);
  
-       if (dev->dev.parent)
+       if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
                NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
  
-       if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
+       if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
+           && (ext_filter_mask & RTEXT_FILTER_VF)) {
                int i;
  
                struct nlattr *vfinfo, *vf;
@@@ -1048,6 -1051,8 +1051,8 @@@ static int rtnl_dump_ifinfo(struct sk_b
        struct net_device *dev;
        struct hlist_head *head;
        struct hlist_node *node;
+       struct nlattr *tb[IFLA_MAX+1];
+       u32 ext_filter_mask = 0;
  
        s_h = cb->args[0];
        s_idx = cb->args[1];
        rcu_read_lock();
        cb->seq = net->dev_base_seq;
  
+       nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+                   ifla_policy);
+       if (tb[IFLA_EXT_MASK])
+               ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
                head = &net->dev_index_head[h];
                        if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
                                             NETLINK_CB(cb->skb).pid,
                                             cb->nlh->nlmsg_seq, 0,
-                                            NLM_F_MULTI) <= 0)
+                                            NLM_F_MULTI,
+                                            ext_filter_mask) <= 0)
                                goto out;
  
                        nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@@ -1100,6 -1112,7 +1112,7 @@@ const struct nla_policy ifla_policy[IFL
        [IFLA_VF_PORTS]         = { .type = NLA_NESTED },
        [IFLA_PORT_SELF]        = { .type = NLA_NESTED },
        [IFLA_AF_SPEC]          = { .type = NLA_NESTED },
+       [IFLA_EXT_MASK]         = { .type = NLA_U32 },
  };
  EXPORT_SYMBOL(ifla_policy);
  
@@@ -1509,8 -1522,6 +1522,6 @@@ errout
  
        if (send_addr_notify)
                call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
-       min_ifinfo_dump_size = max_t(u16, if_nlmsg_size(dev),
-                                    min_ifinfo_dump_size);
  
        return err;
  }
@@@ -1842,6 -1853,7 +1853,7 @@@ static int rtnl_getlink(struct sk_buff 
        struct net_device *dev = NULL;
        struct sk_buff *nskb;
        int err;
+       u32 ext_filter_mask = 0;
  
        err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
        if (err < 0)
        if (tb[IFLA_IFNAME])
                nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
  
+       if (tb[IFLA_EXT_MASK])
+               ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
        ifm = nlmsg_data(nlh);
        if (ifm->ifi_index > 0)
                dev = __dev_get_by_index(net, ifm->ifi_index);
        if (dev == NULL)
                return -ENODEV;
  
-       nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
+       nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
        if (nskb == NULL)
                return -ENOBUFS;
  
        err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
-                              nlh->nlmsg_seq, 0, 0);
+                              nlh->nlmsg_seq, 0, 0, ext_filter_mask);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in if_nlmsg_size */
                WARN_ON(err == -EMSGSIZE);
        return err;
  }
  
- static u16 rtnl_calcit(struct sk_buff *skb)
+ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
+       struct net *net = sock_net(skb->sk);
+       struct net_device *dev;
+       struct nlattr *tb[IFLA_MAX+1];
+       u32 ext_filter_mask = 0;
+       u16 min_ifinfo_dump_size = 0;
+       nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, ifla_policy);
+       if (tb[IFLA_EXT_MASK])
+               ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+       if (!ext_filter_mask)
+               return NLMSG_GOODSIZE;
+       /*
+        * traverse the list of net devices and compute the minimum
+        * buffer size based upon the filter mask.
+        */
+       list_for_each_entry(dev, &net->dev_base_head, dev_list) {
+               min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
+                                            if_nlmsg_size(dev,
+                                                          ext_filter_mask));
+       }
        return min_ifinfo_dump_size;
  }
  
@@@ -1913,13 -1951,11 +1951,11 @@@ void rtmsg_ifinfo(int type, struct net_
        int err = -ENOBUFS;
        size_t if_info_size;
  
-       skb = nlmsg_new((if_info_size = if_nlmsg_size(dev)), GFP_KERNEL);
+       skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
        if (skb == NULL)
                goto errout;
  
-       min_ifinfo_dump_size = max_t(u16, if_info_size, min_ifinfo_dump_size);
-       err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0);
+       err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in if_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
@@@ -1977,17 -2013,12 +2013,17 @@@ static int rtnetlink_rcv_msg(struct sk_
                        return -EOPNOTSUPP;
                calcit = rtnl_get_calcit(family, type);
                if (calcit)
-                       min_dump_alloc = calcit(skb);
+                       min_dump_alloc = calcit(skb, nlh);
  
                __rtnl_unlock();
                rtnl = net->rtnl;
 -              err = netlink_dump_start(rtnl, skb, nlh, dumpit,
 -                                       NULL, min_dump_alloc);
 +              {
 +                      struct netlink_dump_control c = {
 +                              .dump           = dumpit,
 +                              .min_dump_alloc = min_dump_alloc,
 +                      };
 +                      err = netlink_dump_start(rtnl, skb, nlh, &c);
 +              }
                rtnl_lock();
                return err;
        }
diff --combined net/ipv4/ip_gre.c
index b59414a0c1eeffff454baa5cf12d4c5d1136221c,38673d2860e293404a9109c88ec9b03ab6b810f1..6ef66af12291001657f2fb914c78903587a04fec
@@@ -65,7 -65,7 +65,7 @@@
     it is infeasible task. The most general solutions would be
     to keep skb->encapsulation counter (sort of local ttl),
     and silently drop packet when it expires. It is a good
-    solution, but it supposes maintaing new variable in ALL
+    solution, but it supposes maintaining new variable in ALL
     skb, even if no tunneling is used.
  
     Current solution: xmit_recursion breaks dead loops. This is a percpu
  
     One of them is to parse packet trying to detect inner encapsulation
     made by our node. It is difficult or even impossible, especially,
-    taking into account fragmentation. TO be short, tt is not solution at all.
+    taking into account fragmentation. TO be short, ttl is not solution at all.
  
     Current solution: The solution was UNEXPECTEDLY SIMPLE.
     We force DF flag on tunnels with preconfigured hop limit,
     that is ALL. :-) Well, it does not remove the problem completely,
     but exponential growth of network traffic is changed to linear
     (branches, that exceed pmtu are pruned) and tunnel mtu
-    fastly degrades to value <68, where looping stops.
+    rapidly degrades to value <68, where looping stops.
     Yes, it is not good if there exists a router in the loop,
     which does not force DF, even when encapsulating packets have DF set.
     But it is not our problem! Nobody could accuse us, we made
@@@ -457,8 -457,8 +457,8 @@@ static void ipgre_err(struct sk_buff *s
     GRE tunnels with enabled checksum. Tell them "thank you".
  
     Well, I wonder, rfc1812 was written by Cisco employee,
-    what the hell these idiots break standrads established
-    by themself???
+    what the hell these idiots break standards established
+    by themselves???
   */
  
        const struct iphdr *iph = (const struct iphdr *)skb->data;
@@@ -730,16 -730,15 +730,16 @@@ static netdev_tx_t ipgre_tunnel_xmit(st
  
                if (skb->protocol == htons(ETH_P_IP)) {
                        rt = skb_rtable(skb);
 -                      if ((dst = rt->rt_gateway) == 0)
 -                              goto tx_error_icmp;
 +                      dst = rt->rt_gateway;
                }
  #if IS_ENABLED(CONFIG_IPV6)
                else if (skb->protocol == htons(ETH_P_IPV6)) {
 -                      struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb));
                        const struct in6_addr *addr6;
 +                      struct neighbour *neigh;
 +                      bool do_tx_error_icmp;
                        int addr_type;
  
 +                      neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
                        if (neigh == NULL)
                                goto tx_error;
  
                        }
  
                        if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
 +                              do_tx_error_icmp = true;
 +                      else {
 +                              do_tx_error_icmp = false;
 +                              dst = addr6->s6_addr32[3];
 +                      }
 +                      neigh_release(neigh);
 +                      if (do_tx_error_icmp)
                                goto tx_error_icmp;
 -
 -                      dst = addr6->s6_addr32[3];
                }
  #endif
                else
        __IPTUNNEL_XMIT(tstats, &dev->stats);
        return NETDEV_TX_OK;
  
 +#if IS_ENABLED(CONFIG_IPV6)
  tx_error_icmp:
        dst_link_failure(skb);
 -
 +#endif
  tx_error:
        dev->stats.tx_errors++;
        dev_kfree_skb(skb);
@@@ -1536,7 -1529,7 +1536,7 @@@ static int ipgre_newlink(struct net *sr
                return -EEXIST;
  
        if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
 -              random_ether_addr(dev->dev_addr);
 +              eth_hw_addr_random(dev);
  
        mtu = ipgre_tunnel_bind_dev(dev);
        if (!tb[IFLA_MTU])
diff --combined net/ipv4/ping.c
index cfc82cf339f6e387bc0abcb66b4f23f6411c7464,b072386cee218ba508d572d049b2371ba9bf370e..4398a45a9600e61743312ede9fbecb56796173da
@@@ -556,8 -556,7 +556,8 @@@ static int ping_sendmsg(struct kiocb *i
                        ipc.oif = inet->mc_index;
                if (!saddr)
                        saddr = inet->mc_addr;
 -      }
 +      } else if (!ipc.oif)
 +              ipc.oif = inet->uc_index;
  
        flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
                           RT_SCOPE_UNIVERSE, sk->sk_protocol,
@@@ -631,6 -630,7 +631,7 @@@ static int ping_recvmsg(struct kiocb *i
  
        pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num);
  
+       err = -EOPNOTSUPP;
        if (flags & MSG_OOB)
                goto out;
  
diff --combined net/ipv6/ndisc.c
index 8d817018c188f2bb2c48d690f9a8169a7133a34b,c964958ac470f65408c09642814a532205fa497c..3dcdb81ec3e8abdb934627cd243aed0e5ce3b5a3
@@@ -1223,17 -1223,11 +1223,17 @@@ static void ndisc_router_discovery(stru
  
        rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
  
 -      if (rt)
 -              neigh = dst_get_neighbour_noref(&rt->dst);
 -
 +      if (rt) {
 +              neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
 +              if (!neigh) {
 +                      ND_PRINTK0(KERN_ERR
 +                                 "ICMPv6 RA: %s() got default router without neighbour.\n",
 +                                 __func__);
 +                      dst_release(&rt->dst);
 +                      return;
 +              }
 +      }
        if (rt && lifetime == 0) {
 -              neigh_clone(neigh);
                ip6_del_rt(rt);
                rt = NULL;
        }
                        return;
                }
  
 -              neigh = dst_get_neighbour_noref(&rt->dst);
 +              neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
                if (neigh == NULL) {
                        ND_PRINTK0(KERN_ERR
                                   "ICMPv6 RA: %s() got default router without neighbour.\n",
@@@ -1417,7 -1411,7 +1417,7 @@@ skip_routeinfo
  out:
        if (rt)
                dst_release(&rt->dst);
 -      else if (neigh)
 +      if (neigh)
                neigh_release(neigh);
  }
  
@@@ -1512,7 -1506,8 +1512,7 @@@ static void ndisc_redirect_rcv(struct s
        }
  }
  
 -void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
 -                       const struct in6_addr *target)
 +void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
  {
        struct net_device *dev = skb->dev;
        struct net *net = dev_net(dev);
                         &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex);
  
        dst = ip6_route_output(net, NULL, &fl6);
-       if (dst == NULL)
+       if (dst->error) {
+               dst_release(dst);
                return;
+       }
        dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
        if (IS_ERR(dst))
                return;
                goto release;
  
        if (dev->addr_len) {
 +              struct neighbour *neigh = dst_neigh_lookup(skb_dst(skb), target);
 +              if (!neigh) {
 +                      ND_PRINTK2(KERN_WARNING
 +                                 "ICMPv6 Redirect: no neigh for target address\n");
 +                      goto release;
 +              }
 +
                read_lock_bh(&neigh->lock);
                if (neigh->nud_state & NUD_VALID) {
                        memcpy(ha_buf, neigh->ha, dev->addr_len);
                        len += ndisc_opt_addr_space(dev);
                } else
                        read_unlock_bh(&neigh->lock);
 +
 +              neigh_release(neigh);
        }
  
        rd_len = min_t(unsigned int,
index 28d0312d890af22ff0ae0d229e0dbd6fc618e0be,30c9d4ca02180e050d3b98c8480b83dfc71047b9..04fb409623d222182828b54cd675a7c67e77bc1e
@@@ -691,18 -691,9 +691,18 @@@ static int ctnetlink_done(struct netlin
  {
        if (cb->args[1])
                nf_ct_put((struct nf_conn *)cb->args[1]);
 +      if (cb->data)
 +              kfree(cb->data);
        return 0;
  }
  
 +struct ctnetlink_dump_filter {
 +      struct {
 +              u_int32_t val;
 +              u_int32_t mask;
 +      } mark;
 +};
 +
  static int
  ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
  {
        struct hlist_nulls_node *n;
        struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
        u_int8_t l3proto = nfmsg->nfgen_family;
 -
 +#ifdef CONFIG_NF_CONNTRACK_MARK
 +      const struct ctnetlink_dump_filter *filter = cb->data;
 +#endif
        spin_lock_bh(&nf_conntrack_lock);
        last = (struct nf_conn *)cb->args[1];
        for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
@@@ -734,12 -723,6 +734,12 @@@ restart
                                        continue;
                                cb->args[1] = 0;
                        }
 +#ifdef CONFIG_NF_CONNTRACK_MARK
 +                      if (filter && !((ct->mark & filter->mark.mask) ==
 +                                      filter->mark.val)) {
 +                              continue;
 +                      }
 +#endif
                        if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
                                                cb->nlh->nlmsg_seq,
                                                NFNL_MSG_TYPE(
@@@ -911,7 -894,6 +911,7 @@@ static const struct nla_policy ct_nla_p
        [CTA_NAT_DST]           = { .type = NLA_NESTED },
        [CTA_TUPLE_MASTER]      = { .type = NLA_NESTED },
        [CTA_ZONE]              = { .type = NLA_U16 },
 +      [CTA_MARK_MASK]         = { .type = NLA_U32 },
  };
  
  static int
@@@ -995,28 -977,9 +995,28 @@@ ctnetlink_get_conntrack(struct sock *ct
        u16 zone;
        int err;
  
 -      if (nlh->nlmsg_flags & NLM_F_DUMP)
 -              return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
 -                                        ctnetlink_done, 0);
 +      if (nlh->nlmsg_flags & NLM_F_DUMP) {
 +              struct netlink_dump_control c = {
 +                      .dump = ctnetlink_dump_table,
 +                      .done = ctnetlink_done,
 +              };
 +#ifdef CONFIG_NF_CONNTRACK_MARK
 +              if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
 +                      struct ctnetlink_dump_filter *filter;
 +
 +                      filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
 +                                       GFP_ATOMIC);
 +                      if (filter == NULL)
 +                              return -ENOMEM;
 +
 +                      filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
 +                      filter->mark.mask =
 +                              ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
 +                      c.data = filter;
 +              }
 +#endif
 +              return netlink_dump_start(ctnl, skb, nlh, &c);
 +      }
  
        err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
        if (err < 0)
@@@ -1404,15 -1367,12 +1404,12 @@@ ctnetlink_create_conntrack(struct net *
                                                    nf_ct_protonum(ct));
                if (helper == NULL) {
                        rcu_read_unlock();
-                       spin_unlock_bh(&nf_conntrack_lock);
  #ifdef CONFIG_MODULES
                        if (request_module("nfct-helper-%s", helpname) < 0) {
-                               spin_lock_bh(&nf_conntrack_lock);
                                err = -EOPNOTSUPP;
                                goto err1;
                        }
  
-                       spin_lock_bh(&nf_conntrack_lock);
                        rcu_read_lock();
                        helper = __nf_conntrack_helper_find(helpname,
                                                            nf_ct_l3num(ct),
        if (tstamp)
                tstamp->start = ktime_to_ns(ktime_get_real());
  
-       add_timer(&ct->timeout);
-       nf_conntrack_hash_insert(ct);
+       err = nf_conntrack_hash_check_insert(ct);
+       if (err < 0)
+               goto err2;
        rcu_read_unlock();
  
        return ct;
@@@ -1527,6 -1489,7 +1526,7 @@@ ctnetlink_new_conntrack(struct sock *ct
        struct nf_conntrack_tuple otuple, rtuple;
        struct nf_conntrack_tuple_hash *h = NULL;
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       struct nf_conn *ct;
        u_int8_t u3 = nfmsg->nfgen_family;
        u16 zone;
        int err;
                        return err;
        }
  
-       spin_lock_bh(&nf_conntrack_lock);
        if (cda[CTA_TUPLE_ORIG])
-               h = __nf_conntrack_find(net, zone, &otuple);
+               h = nf_conntrack_find_get(net, zone, &otuple);
        else if (cda[CTA_TUPLE_REPLY])
-               h = __nf_conntrack_find(net, zone, &rtuple);
+               h = nf_conntrack_find_get(net, zone, &rtuple);
  
        if (h == NULL) {
                err = -ENOENT;
                if (nlh->nlmsg_flags & NLM_F_CREATE) {
-                       struct nf_conn *ct;
                        enum ip_conntrack_events events;
  
                        ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
                                                        &rtuple, u3);
-                       if (IS_ERR(ct)) {
-                               err = PTR_ERR(ct);
-                               goto out_unlock;
-                       }
+                       if (IS_ERR(ct))
+                               return PTR_ERR(ct);
                        err = 0;
-                       nf_conntrack_get(&ct->ct_general);
-                       spin_unlock_bh(&nf_conntrack_lock);
                        if (test_bit(IPS_EXPECTED_BIT, &ct->status))
                                events = IPCT_RELATED;
                        else
                                                      ct, NETLINK_CB(skb).pid,
                                                      nlmsg_report(nlh));
                        nf_ct_put(ct);
-               } else
-                       spin_unlock_bh(&nf_conntrack_lock);
+               }
  
                return err;
        }
        /* implicit 'else' */
  
-       /* We manipulate the conntrack inside the global conntrack table lock,
-        * so there's no need to increase the refcount */
        err = -EEXIST;
+       ct = nf_ct_tuplehash_to_ctrack(h);
        if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
-               struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+               spin_lock_bh(&nf_conntrack_lock);
                err = ctnetlink_change_conntrack(ct, cda);
+               spin_unlock_bh(&nf_conntrack_lock);
                if (err == 0) {
-                       nf_conntrack_get(&ct->ct_general);
-                       spin_unlock_bh(&nf_conntrack_lock);
                        nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
                                                      (1 << IPCT_ASSURED) |
                                                      (1 << IPCT_HELPER) |
                                                      (1 << IPCT_MARK),
                                                      ct, NETLINK_CB(skb).pid,
                                                      nlmsg_report(nlh));
-                       nf_ct_put(ct);
-               } else
-                       spin_unlock_bh(&nf_conntrack_lock);
-               return err;
+               }
        }
  
- out_unlock:
-       spin_unlock_bh(&nf_conntrack_lock);
+       nf_ct_put(ct);
        return err;
  }
  
@@@ -1887,11 -1836,9 +1873,11 @@@ ctnetlink_get_expect(struct sock *ctnl
        int err;
  
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
 -              return netlink_dump_start(ctnl, skb, nlh,
 -                                        ctnetlink_exp_dump_table,
 -                                        ctnetlink_exp_done, 0);
 +              struct netlink_dump_control c = {
 +                      .dump = ctnetlink_exp_dump_table,
 +                      .done = ctnetlink_exp_done,
 +              };
 +              return netlink_dump_start(ctnl, skb, nlh, &c);
        }
  
        err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);