Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 5 Feb 2013 19:12:20 +0000 (14:12 -0500)
committerDavid S. Miller <davem@davemloft.net>
Tue, 5 Feb 2013 19:12:20 +0000 (14:12 -0500)
Conflicts:
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wireless/iwlwifi/dvm/tx.c
net/ipv6/route.c

The ipv6 route.c conflict is simple, just ignore the 'net' side change
as we fixed the same problem in 'net-next' by eliminating cached
neighbours from ipv6 routes.

The e1000e conflict is an addition of a new statistic in the ethtool
code, trivial.

The vmxnet3 conflict is about one change in 'net' removing a guarding
conditional, whilst in 'net-next' we had a netdev_info() conversion.

The iwlwifi conflict is dealing with a WARN_ON() conversion in
'net-next' vs. a revert happening in 'net'.

Signed-off-by: David S. Miller <davem@davemloft.net>
38 files changed:
drivers/bcma/driver_chipcommon_nflash.c
drivers/net/bonding/bond_sysfs.c
drivers/net/can/c_can/c_can.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/tun.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/usbnet.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/rtlwifi/usb.c
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
include/linux/usb/usbnet.h
include/net/transp_v6.h
net/core/pktgen.c
net/core/skbuff.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ipv6_sockglue.c
net/ipv6/raw.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_ip6.c
net/packet/af_packet.c
net/sched/sch_netem.c
net/sunrpc/svcsock.c
net/wireless/scan.c

index dbda91e4dff5189ccabbb710b7a2a11f0e099651..1f0b83e18f6827f5003669b4015c2a89b2d369c5 100644 (file)
@@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
        struct bcma_bus *bus = cc->core->bus;
 
        if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
-           cc->core->id.rev != 0x38) {
+           cc->core->id.rev != 38) {
                bcma_err(bus, "NAND flash on unsupported board!\n");
                return -ENOTSUPP;
        }
index 1877ed7ca0864ed4adfb614de862ed40dde730bd..1c9e09fbdff8346e99413e0313d03ed401f463e2 100644 (file)
@@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
                pr_info("%s: Setting primary slave to None.\n",
                        bond->dev->name);
                bond->primary_slave = NULL;
+               memset(bond->params.primary, 0, sizeof(bond->params.primary));
                bond_select_active_slave(bond);
                goto out;
        }
index 285f763e3cd13ca8dbdc2329283e850ebe6cdae5..a668cd491cb3ae427e38db535df7a8c9dc1109e1 100644 (file)
@@ -491,8 +491,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
 
        priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
                        IFX_WRITE_LOW_16BIT(mask));
+
+       /* According to C_CAN documentation, the reserved bit
+        * in IFx_MASK2 register is fixed 1
+        */
        priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
-                       IFX_WRITE_HIGH_16BIT(mask));
+                       IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
 
        priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
                        IFX_WRITE_LOW_16BIT(id));
index 4010cb71bddbe3039ab234ccaa0b2d47e123371e..28ceb84141851e26e8d49f1175099235a512d54b 100644 (file)
 
 #define DRV_VER                        "4.6.62.0u"
 #define DRV_NAME               "be2net"
-#define BE_NAME                        "ServerEngines BladeEngine2 10Gbps NIC"
-#define BE3_NAME               "ServerEngines BladeEngine3 10Gbps NIC"
-#define OC_NAME                        "Emulex OneConnect 10Gbps NIC"
+#define BE_NAME                        "Emulex BladeEngine2"
+#define BE3_NAME               "Emulex BladeEngine3"
+#define OC_NAME                        "Emulex OneConnect"
 #define OC_NAME_BE             OC_NAME "(be3)"
 #define OC_NAME_LANCER         OC_NAME "(Lancer)"
 #define OC_NAME_SH             OC_NAME "(Skyhawk)"
-#define DRV_DESC               "ServerEngines BladeEngine 10Gbps NIC Driver"
+#define DRV_DESC               "Emulex OneConnect 10Gbps NIC Driver"
 
 #define BE_VENDOR_ID           0x19a2
 #define EMULEX_VENDOR_ID       0x10df
index 7d534818d2fb7fc6e8b1196df97b043c9fb9de14..3860888ac711a7fe7630cad2c06bb3819eb1f3a0 100644 (file)
@@ -25,7 +25,7 @@
 MODULE_VERSION(DRV_VER);
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
-MODULE_AUTHOR("ServerEngines Corporation");
+MODULE_AUTHOR("Emulex Corporation");
 MODULE_LICENSE("GPL");
 
 static unsigned int num_vfs;
index ec4a5e1c6fb2c5c791b89f03eddf106430f3b5b2..185c721c52d7ba544e613020dcf619cede479e94 100644 (file)
@@ -1812,7 +1812,7 @@ static void rhine_tx(struct net_device *dev)
                                         rp->tx_skbuff[entry]->len,
                                         PCI_DMA_TODEVICE);
                }
-               dev_kfree_skb_irq(rp->tx_skbuff[entry]);
+               dev_kfree_skb(rp->tx_skbuff[entry]);
                rp->tx_skbuff[entry] = NULL;
                entry = (++rp->dirty_tx) % TX_RING_SIZE;
        }
@@ -2024,11 +2024,7 @@ static void rhine_slow_event_task(struct work_struct *work)
        if (intr_status & IntrPCIErr)
                netif_warn(rp, hw, dev, "PCI error\n");
 
-       napi_disable(&rp->napi);
-       rhine_irq_disable(rp);
-       /* Slow and safe. Consider __napi_schedule as a replacement ? */
-       napi_enable(&rp->napi);
-       napi_schedule(&rp->napi);
+       iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
 
 out_unlock:
        mutex_unlock(&rp->task_lock);
index 8d208dd929634c373794c62f001ccbc766b1e241..b1038c0e2240d9ef8d6697b01c2b70b61e6128cb 100644 (file)
@@ -298,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)
 }
 
 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
-                           u16 queue_index)
+                           struct tun_file *tfile)
 {
        struct hlist_head *head;
        struct tun_flow_entry *e;
        unsigned long delay = tun->ageing_time;
+       u16 queue_index = tfile->queue_index;
 
        if (!rxhash)
                return;
@@ -311,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
 
        rcu_read_lock();
 
-       if (tun->numqueues == 1)
+       /* We may get a very small possibility of OOO during switching, not
+        * worth to optimize.*/
+       if (tun->numqueues == 1 || tfile->detached)
                goto unlock;
 
        e = tun_flow_find(head, rxhash);
@@ -411,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
 
        tun = rtnl_dereference(tfile->tun);
 
-       if (tun) {
+       if (tun && !tfile->detached) {
                u16 index = tfile->queue_index;
                BUG_ON(index >= tun->numqueues);
                dev = tun->dev;
 
                rcu_assign_pointer(tun->tfiles[index],
                                   tun->tfiles[tun->numqueues - 1]);
-               rcu_assign_pointer(tfile->tun, NULL);
                ntfile = rtnl_dereference(tun->tfiles[index]);
                ntfile->queue_index = index;
 
                --tun->numqueues;
-               if (clean)
+               if (clean) {
+                       rcu_assign_pointer(tfile->tun, NULL);
                        sock_put(&tfile->sk);
-               else
+               else
                        tun_disable_queue(tun, tfile);
 
                synchronize_net();
@@ -439,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
        }
 
        if (clean) {
-               if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
-                   !(tun->flags & TUN_PERSIST))
-                       if (tun->dev->reg_state == NETREG_REGISTERED)
+               if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
+                       netif_carrier_off(tun->dev);
+
+                       if (!(tun->flags & TUN_PERSIST) &&
+                           tun->dev->reg_state == NETREG_REGISTERED)
                                unregister_netdevice(tun->dev);
+               }
 
                BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
                                 &tfile->socket.flags));
@@ -470,6 +476,10 @@ static void tun_detach_all(struct net_device *dev)
                rcu_assign_pointer(tfile->tun, NULL);
                --tun->numqueues;
        }
+       list_for_each_entry(tfile, &tun->disabled, next) {
+               wake_up_all(&tfile->wq.wait);
+               rcu_assign_pointer(tfile->tun, NULL);
+       }
        BUG_ON(tun->numqueues != 0);
 
        synchronize_net();
@@ -500,7 +510,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
                goto out;
 
        err = -EINVAL;
-       if (rtnl_dereference(tfile->tun))
+       if (rtnl_dereference(tfile->tun) && !tfile->detached)
                goto out;
 
        err = -EBUSY;
@@ -1203,7 +1213,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        tun->dev->stats.rx_packets++;
        tun->dev->stats.rx_bytes += len;
 
-       tun_flow_update(tun, rxhash, tfile->queue_index);
+       tun_flow_update(tun, rxhash, tfile);
        return total_len;
 }
 
@@ -1662,10 +1672,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
                    device_create_file(&tun->dev->dev, &dev_attr_group))
                        pr_err("Failed to create tun sysfs files\n");
-
-               netif_carrier_on(tun->dev);
        }
 
+       netif_carrier_on(tun->dev);
+
        tun_debug(KERN_INFO, tun, "tun_set_iff\n");
 
        if (ifr->ifr_flags & IFF_NO_PI)
@@ -1817,7 +1827,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
                ret = tun_attach(tun, file);
        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
                tun = rtnl_dereference(tfile->tun);
-               if (!tun || !(tun->flags & TUN_TAP_MQ))
+               if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
                        ret = -EINVAL;
                else
                        __tun_detach(tfile, false);
index 575a5839ee3458434d355677d785589d3472395d..2ca7f8ea2dcae4d0e1449203dcd015511e0a880b 100644 (file)
@@ -461,6 +461,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
+       {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
index 30c1b330e983a2ae163f9965c4aa73a416179468..51f3192f3931e0a179dcb619e4e714a0565656fc 100644 (file)
@@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
        unsigned long           lockflags;
        size_t                  size = dev->rx_urb_size;
 
+       /* prevent rx skb allocation when error ratio is high */
+       if (test_bit(EVENT_RX_KILL, &dev->flags)) {
+               usb_free_urb(urb);
+               return -ENOLINK;
+       }
+
        skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
        if (!skb) {
                netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
@@ -539,6 +545,17 @@ block:
                break;
        }
 
+       /* stop rx if packet error rate is high */
+       if (++dev->pkt_cnt > 30) {
+               dev->pkt_cnt = 0;
+               dev->pkt_err = 0;
+       } else {
+               if (state == rx_cleanup)
+                       dev->pkt_err++;
+               if (dev->pkt_err > 20)
+                       set_bit(EVENT_RX_KILL, &dev->flags);
+       }
+
        state = defer_bh(dev, skb, &dev->rxq, state);
 
        if (urb) {
@@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net)
                   (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
                   "simple");
 
+       /* reset rx error state */
+       dev->pkt_cnt = 0;
+       dev->pkt_err = 0;
+       clear_bit(EVENT_RX_KILL, &dev->flags);
+
        // delay posting reads until we're fully open
        tasklet_schedule (&dev->bh);
        if (info->manage_power) {
@@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
        if (info->tx_fixup) {
                skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
                if (!skb) {
-                       if (netif_msg_tx_err(dev)) {
-                               netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
-                               goto drop;
-                       } else {
-                               /* cdc_ncm collected packet; waits for more */
+                       /* packet collected; minidriver waiting for more */
+                       if (info->flags & FLAG_MULTI_PACKET)
                                goto not_drop;
-                       }
+                       netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
+                       goto drop;
                }
        }
        length = skb->len;
@@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param)
                }
        }
 
+       /* restart RX again after disabling due to high error rate */
+       clear_bit(EVENT_RX_KILL, &dev->flags);
+
        // waiting for all pending urbs to complete?
        if (dev->wait) {
                if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
index b1c90f8ccd3d39a4dfb806c428cd8e665d2f0a82..ffb97b2a15a036e48989e4addcb9644fe22544b7 100644 (file)
@@ -150,8 +150,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
        if (ret & 1) { /* Link is up. */
                netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
                            adapter->link_speed);
-               if (!netif_carrier_ok(adapter->netdev))
-                       netif_carrier_on(adapter->netdev);
+               netif_carrier_on(adapter->netdev);
 
                if (affectTxQueue) {
                        for (i = 0; i < adapter->num_tx_queues; i++)
@@ -160,8 +159,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
                }
        } else {
                netdev_info(adapter->netdev, "NIC Link is Down\n");
-               if (netif_carrier_ok(adapter->netdev))
-                       netif_carrier_off(adapter->netdev);
+               netif_carrier_off(adapter->netdev);
 
                if (affectTxQueue) {
                        for (i = 0; i < adapter->num_tx_queues; i++)
@@ -3060,6 +3058,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
        netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
 
+       netif_carrier_off(netdev);
        err = register_netdev(netdev);
 
        if (err) {
index c26992a60e6c17ff2cbe0c84ed63527ab9bb1198..8d560b64516d2bb11a3c4fb0e75b80c71edf27bf 100644 (file)
@@ -1027,7 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
 static bool
 brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
 {
-       bool morepending = false;
        struct bcma_device *core;
        struct tx_status txstatus, *txs;
        u32 s1, s2;
@@ -1041,23 +1040,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
        txs = &txstatus;
        core = wlc_hw->d11core;
        *fatal = false;
-       s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
-       while (!(*fatal)
-              && (s1 & TXS_V)) {
-               /* !give others some time to run! */
-               if (n >= max_tx_num) {
-                       morepending = true;
-                       break;
-               }
 
+       while (n < max_tx_num) {
+               s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
                if (s1 == 0xffffffff) {
                        brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
                                  __func__);
                        *fatal = true;
                        return false;
                }
-               s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
+               /* only process when valid */
+               if (!(s1 & TXS_V))
+                       break;
 
+               s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
                txs->status = s1 & TXS_STATUS_MASK;
                txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
                txs->sequence = s2 & TXS_SEQ_MASK;
@@ -1065,15 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
                txs->lasttxtime = 0;
 
                *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
-
-               s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
+               if (*fatal == true)
+                       return false;
                n++;
        }
 
-       if (*fatal)
-               return false;
-
-       return morepending;
+       return n >= max_tx_num;
 }
 
 static void brcms_c_tbtt(struct brcms_c_info *wlc)
index 6b01fc1959406016d761699bd298c5628719d174..191b9d4bee472cfdf93de170700a93f77be3eae5 100644 (file)
@@ -1145,6 +1145,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                        next_reclaimed = ssn;
                }
 
+               if (tid != IWL_TID_NON_QOS) {
+                       priv->tid_data[sta_id][tid].next_reclaimed =
+                               next_reclaimed;
+                       IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+                                                 next_reclaimed);
+               }
+
                iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
 
                iwlagn_check_ratid_empty(priv, sta_id, tid);
@@ -1195,30 +1202,12 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                        if (!is_agg)
                                iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
 
-                       /*
-                        * W/A for FW bug - the seq_ctl isn't updated when the
-                        * queues are flushed. Fetch it from the packet itself
-                        */
-                       if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
-                               next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
-                               next_reclaimed =
-                                       SEQ_TO_SN(next_reclaimed + 0x10);
-                       }
-
                        is_offchannel_skb =
                                (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
                        freed++;
                }
 
-               if (tid != IWL_TID_NON_QOS) {
-                       priv->tid_data[sta_id][tid].next_reclaimed =
-                               next_reclaimed;
-                       IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
-                                          next_reclaimed);
-               }
-
-               if (!is_agg && freed != 1)
-                       IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
+               WARN_ON(!is_agg && freed != 1);
 
                /*
                 * An offchannel frame can be send only on the AUX queue, where
index f0de40166dc30395f580871b2d2e1f0c9a21e985..d41f0e6472808fe822cbdee97dc80947ca0e040f 100644 (file)
@@ -1557,7 +1557,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
                        scan_rsp->number_of_sets);
                ret = -1;
-               goto done;
+               goto check_next_scan;
        }
 
        bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
@@ -1628,7 +1628,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                if (!beacon_size || beacon_size > bytes_left) {
                        bss_info += bytes_left;
                        bytes_left = 0;
-                       return -1;
+                       ret = -1;
+                       goto check_next_scan;
                }
 
                /* Initialize the current working beacon pointer for this BSS
@@ -1684,7 +1685,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                                dev_err(priv->adapter->dev,
                                        "%s: bytes left < IE length\n",
                                        __func__);
-                               goto done;
+                               goto check_next_scan;
                        }
                        if (element_id == WLAN_EID_DS_PARAMS) {
                                channel = *(current_ptr + sizeof(struct ieee_types_header));
@@ -1747,6 +1748,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                }
        }
 
+check_next_scan:
        spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
        if (list_empty(&adapter->scan_pending_q)) {
                spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
@@ -1807,7 +1809,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                }
        }
 
-done:
        return ret;
 }
 
index f2ecdeb3a90d441ee809af029a1db1c8dab80f48..1535efda3d525a0dfafe54bb4442aacec4b00446 100644 (file)
@@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
        WARN_ON(skb_queue_empty(&rx_queue));
        while (!skb_queue_empty(&rx_queue)) {
                _skb = skb_dequeue(&rx_queue);
-               _rtl_usb_rx_process_agg(hw, skb);
-               ieee80211_rx_irqsafe(hw, skb);
+               _rtl_usb_rx_process_agg(hw, _skb);
+               ieee80211_rx_irqsafe(hw, _skb);
        }
 }
 
index ebd08b21b23432696047487367d44f76d4fa4f02..959b1cd89e6a5be5a402a79089077609f8e30716 100644 (file)
@@ -165,12 +165,16 @@ static void tx_poll_stop(struct vhost_net *net)
 }
 
 /* Caller must have TX VQ lock */
-static void tx_poll_start(struct vhost_net *net, struct socket *sock)
+static int tx_poll_start(struct vhost_net *net, struct socket *sock)
 {
+       int ret;
+
        if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
-               return;
-       vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
-       net->tx_poll_state = VHOST_NET_POLL_STARTED;
+               return 0;
+       ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
+       if (!ret)
+               net->tx_poll_state = VHOST_NET_POLL_STARTED;
+       return ret;
 }
 
 /* In case of DMA done not in order in lower device driver for some reason.
@@ -642,20 +646,23 @@ static void vhost_net_disable_vq(struct vhost_net *n,
                vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
 }
 
-static void vhost_net_enable_vq(struct vhost_net *n,
+static int vhost_net_enable_vq(struct vhost_net *n,
                                struct vhost_virtqueue *vq)
 {
        struct socket *sock;
+       int ret;
 
        sock = rcu_dereference_protected(vq->private_data,
                                         lockdep_is_held(&vq->mutex));
        if (!sock)
-               return;
+               return 0;
        if (vq == n->vqs + VHOST_NET_VQ_TX) {
                n->tx_poll_state = VHOST_NET_POLL_STOPPED;
-               tx_poll_start(n, sock);
+               ret = tx_poll_start(n, sock);
        } else
-               vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
+               ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
+
+       return ret;
 }
 
 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
@@ -827,15 +834,18 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
                        r = PTR_ERR(ubufs);
                        goto err_ubufs;
                }
-               oldubufs = vq->ubufs;
-               vq->ubufs = ubufs;
+
                vhost_net_disable_vq(n, vq);
                rcu_assign_pointer(vq->private_data, sock);
-               vhost_net_enable_vq(n, vq);
-
                r = vhost_init_used(vq);
                if (r)
-                       goto err_vq;
+                       goto err_used;
+               r = vhost_net_enable_vq(n, vq);
+               if (r)
+                       goto err_used;
+
+               oldubufs = vq->ubufs;
+               vq->ubufs = ubufs;
 
                n->tx_packets = 0;
                n->tx_zcopy_err = 0;
@@ -859,6 +869,11 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
        mutex_unlock(&n->dev.mutex);
        return 0;
 
+err_used:
+       rcu_assign_pointer(vq->private_data, oldsock);
+       vhost_net_enable_vq(n, vq);
+       if (ubufs)
+               vhost_ubuf_put_and_wait(ubufs);
 err_ubufs:
        fput(sock->file);
 err_vq:
index 34389f75fe65693a4ad5baa36715159fd5759bbb..9759249e6d908867cf72f53e5bb8e4ecc30db8ae 100644 (file)
@@ -77,26 +77,38 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
        init_poll_funcptr(&poll->table, vhost_poll_func);
        poll->mask = mask;
        poll->dev = dev;
+       poll->wqh = NULL;
 
        vhost_work_init(&poll->work, fn);
 }
 
 /* Start polling a file. We add ourselves to file's wait queue. The caller must
  * keep a reference to a file until after vhost_poll_stop is called. */
-void vhost_poll_start(struct vhost_poll *poll, struct file *file)
+int vhost_poll_start(struct vhost_poll *poll, struct file *file)
 {
        unsigned long mask;
+       int ret = 0;
 
        mask = file->f_op->poll(file, &poll->table);
        if (mask)
                vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
+       if (mask & POLLERR) {
+               if (poll->wqh)
+                       remove_wait_queue(poll->wqh, &poll->wait);
+               ret = -EINVAL;
+       }
+
+       return ret;
 }
 
 /* Stop polling a file. After this function returns, it becomes safe to drop the
  * file reference. You must also flush afterwards. */
 void vhost_poll_stop(struct vhost_poll *poll)
 {
-       remove_wait_queue(poll->wqh, &poll->wait);
+       if (poll->wqh) {
+               remove_wait_queue(poll->wqh, &poll->wait);
+               poll->wqh = NULL;
+       }
 }
 
 static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
@@ -792,7 +804,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
                fput(filep);
 
        if (pollstart && vq->handle_kick)
-               vhost_poll_start(&vq->poll, vq->kick);
+               r = vhost_poll_start(&vq->poll, vq->kick);
 
        mutex_unlock(&vq->mutex);
 
index 2639c58b23ab497ace850895f3322df661a965df..17261e277c022abbffe8effc6a8492336ea7edef 100644 (file)
@@ -42,7 +42,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
 
 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
                     unsigned long mask, struct vhost_dev *dev);
-void vhost_poll_start(struct vhost_poll *poll, struct file *file);
+int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 void vhost_poll_stop(struct vhost_poll *poll);
 void vhost_poll_flush(struct vhost_poll *poll);
 void vhost_poll_queue(struct vhost_poll *poll);
index 5de7a220e98680f9c9cfc7b36c1f5c2114d7144f..0de078d4cdb96f044688bfaa6b54a3bc4a59667b 100644 (file)
@@ -33,6 +33,7 @@ struct usbnet {
        wait_queue_head_t       *wait;
        struct mutex            phy_mutex;
        unsigned char           suspend_count;
+       unsigned char           pkt_cnt, pkt_err;
 
        /* i/o info: pipes etc */
        unsigned                in, out;
@@ -70,6 +71,7 @@ struct usbnet {
 #              define EVENT_DEV_OPEN   7
 #              define EVENT_DEVICE_REPORT_IDLE 8
 #              define EVENT_NO_RUNTIME_PM      9
+#              define EVENT_RX_KILL    10
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
index 498433dd067dd64eb6c728e566e61585093e960f..938b7fd1120477213888f9c7e61f98039bea3e03 100644 (file)
@@ -34,17 +34,17 @@ extern int                          udpv6_connect(struct sock *sk,
                                                      struct sockaddr *uaddr,
                                                      int addr_len);
 
-extern int                     datagram_recv_ctl(struct sock *sk,
-                                                 struct msghdr *msg,
-                                                 struct sk_buff *skb);
-
-extern int                     datagram_send_ctl(struct net *net,
-                                                 struct sock *sk,
-                                                 struct msghdr *msg,
-                                                 struct flowi6 *fl6,
-                                                 struct ipv6_txoptions *opt,
-                                                 int *hlimit, int *tclass,
-                                                 int *dontfrag);
+extern int                     ip6_datagram_recv_ctl(struct sock *sk,
+                                                     struct msghdr *msg,
+                                                     struct sk_buff *skb);
+
+extern int                     ip6_datagram_send_ctl(struct net *net,
+                                                     struct sock *sk,
+                                                     struct msghdr *msg,
+                                                     struct flowi6 *fl6,
+                                                     struct ipv6_txoptions *opt,
+                                                     int *hlimit, int *tclass,
+                                                     int *dontfrag);
 
 #define                LOOPBACK4_IPV6          cpu_to_be32(0x7f000006)
 
index 797769551b91181421a6622ddbe7de46f9d107a0..2201e699ad67bdc84e74ef4b85b8cc08ecd76276 100644 (file)
@@ -1790,10 +1790,13 @@ static ssize_t pktgen_thread_write(struct file *file,
                        return -EFAULT;
                i += len;
                mutex_lock(&pktgen_thread_lock);
-               pktgen_add_device(t, f);
+               ret = pktgen_add_device(t, f);
                mutex_unlock(&pktgen_thread_lock);
-               ret = count;
-               sprintf(pg_result, "OK: add_device=%s", f);
+               if (!ret) {
+                       ret = count;
+                       sprintf(pg_result, "OK: add_device=%s", f);
+               } else
+                       sprintf(pg_result, "ERROR: can not add device %s", f);
                goto out;
        }
 
index bddc1dd2e7f221331136aec6e4b6efebdf4feb2c..55f7ef6ada6d6f10e482bda90d09415ad1d45bd5 100644 (file)
@@ -686,7 +686,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        new->network_header     = old->network_header;
        new->mac_header         = old->mac_header;
        new->inner_transport_header = old->inner_transport_header;
-       new->inner_network_header = old->inner_transport_header;
+       new->inner_network_header = old->inner_network_header;
        skb_dst_copy(new, old);
        new->rxhash             = old->rxhash;
        new->ooo_okay           = old->ooo_okay;
index 291f2ed7cc311649a72700d65098b6d70ba338b5..cdf2e707bb100500511d6b2d46ece69e19163cfa 100644 (file)
@@ -310,6 +310,12 @@ void tcp_slow_start(struct tcp_sock *tp)
 {
        int cnt; /* increase in packets */
        unsigned int delta = 0;
+       u32 snd_cwnd = tp->snd_cwnd;
+
+       if (unlikely(!snd_cwnd)) {
+               pr_err_once("snd_cwnd is nul, please report this bug.\n");
+               snd_cwnd = 1U;
+       }
 
        /* RFC3465: ABC Slow start
         * Increase only after a full MSS of bytes is acked
@@ -324,7 +330,7 @@ void tcp_slow_start(struct tcp_sock *tp)
        if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
                cnt = sysctl_tcp_max_ssthresh >> 1;     /* limited slow start */
        else
-               cnt = tp->snd_cwnd;                     /* exponential increase */
+               cnt = snd_cwnd;                         /* exponential increase */
 
        /* RFC3465: ABC
         * We MAY increase by 2 if discovered delayed ack
@@ -334,11 +340,11 @@ void tcp_slow_start(struct tcp_sock *tp)
        tp->bytes_acked = 0;
 
        tp->snd_cwnd_cnt += cnt;
-       while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
-               tp->snd_cwnd_cnt -= tp->snd_cwnd;
+       while (tp->snd_cwnd_cnt >= snd_cwnd) {
+               tp->snd_cwnd_cnt -= snd_cwnd;
                delta++;
        }
-       tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp);
+       tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);
 }
 EXPORT_SYMBOL_GPL(tcp_slow_start);
 
index 492c7cfe14532aed43a1df27c433c76b2ab4cf68..e376aa9591bc6cf436ab267ab3bd3a3e105c9067 100644 (file)
@@ -3482,7 +3482,8 @@ static bool tcp_process_frto(struct sock *sk, int flag)
            ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED)))
                tp->undo_marker = 0;
 
-       if (!before(tp->snd_una, tp->frto_highmark)) {
+       if (!before(tp->snd_una, tp->frto_highmark) ||
+           !tcp_packets_in_flight(tp)) {
                tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
                return true;
        }
@@ -5647,8 +5648,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
         * the remote receives only the retransmitted (regular) SYNs: either
         * the original SYN-data or the corresponding SYN-ACK is lost.
         */
-       syn_drop = (cookie->len <= 0 && data &&
-                   inet_csk(sk)->icsk_retransmits);
+       syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
 
        tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
 
index 5a1cfc692df0b89df03b50165a28d011cf529c9b..0eaf685bddc954f89d4c1fc9f3e32fe6bdcc016f 100644 (file)
@@ -496,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                 * errors returned from accept().
                 */
                inet_csk_reqsk_queue_drop(sk, req, prev);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
                goto out;
 
        case TCP_SYN_SENT:
@@ -1501,8 +1502,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
         * clogging syn queue with openreqs with exponentially increasing
         * timeout.
         */
-       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
                goto drop;
+       }
 
        req = inet_reqsk_alloc(&tcp_request_sock_ops);
        if (!req)
@@ -1667,6 +1670,7 @@ drop_and_release:
 drop_and_free:
        reqsk_free(req);
 drop:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return 0;
 }
 EXPORT_SYMBOL(tcp_v4_conn_request);
index 7f7332b446992d6f01dc873eebe976e7ac433f12..bd9f9360f769a1e3cc92568c2e94b4a03ef6c1dd 100644 (file)
@@ -1656,6 +1656,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
        if (dev->addr_len != IEEE802154_ADDR_LEN)
                return -1;
        memcpy(eui, dev->dev_addr, 8);
+       eui[0] ^= 2;
        return 0;
 }
 
index 33be36398a786d0143ce8a8da0995f7966a22103..f5a54782a3408422ab4aa5da733bdbffb8755905 100644 (file)
@@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
                if (skb->protocol == htons(ETH_P_IPV6)) {
                        sin->sin6_addr = ipv6_hdr(skb)->saddr;
                        if (np->rxopt.all)
-                               datagram_recv_ctl(sk, msg, skb);
+                               ip6_datagram_recv_ctl(sk, msg, skb);
                        if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
                                sin->sin6_scope_id = IP6CB(skb)->iif;
                } else {
@@ -468,7 +468,8 @@ out:
 }
 
 
-int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
+int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
+                         struct sk_buff *skb)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct inet6_skb_parm *opt = IP6CB(skb);
@@ -598,11 +599,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
        }
        return 0;
 }
+EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
 
-int datagram_send_ctl(struct net *net, struct sock *sk,
-                     struct msghdr *msg, struct flowi6 *fl6,
-                     struct ipv6_txoptions *opt,
-                     int *hlimit, int *tclass, int *dontfrag)
+int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
+                         struct msghdr *msg, struct flowi6 *fl6,
+                         struct ipv6_txoptions *opt,
+                         int *hlimit, int *tclass, int *dontfrag)
 {
        struct in6_pktinfo *src_info;
        struct cmsghdr *cmsg;
@@ -872,4 +874,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
 exit_f:
        return err;
 }
-EXPORT_SYMBOL_GPL(datagram_send_ctl);
+EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
index 22494afd981cb0095e92a3bddf36467d86ce2da9..ea42bf40a9977636f98e3ab96cc246a28aae168f 100644 (file)
@@ -390,8 +390,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
                msg.msg_control = (void*)(fl->opt+1);
                memset(&flowi6, 0, sizeof(flowi6));
 
-               err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
-                                       &junk, &junk);
+               err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
+                                           &junk, &junk, &junk);
                if (err)
                        goto done;
                err = -EINVAL;
index ee94d31c9d4d494cdfe2dce1ac812e4e094fe65f..d1e2e8ef29c54abd86064728ec145f0478f360af 100644 (file)
@@ -476,8 +476,8 @@ sticky_done:
                msg.msg_controllen = optlen;
                msg.msg_control = (void*)(opt+1);
 
-               retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
-                                        &junk);
+               retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
+                                            &junk, &junk);
                if (retv)
                        goto done;
 update:
@@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                release_sock(sk);
 
                if (skb) {
-                       int err = datagram_recv_ctl(sk, &msg, skb);
+                       int err = ip6_datagram_recv_ctl(sk, &msg, skb);
                        kfree_skb(skb);
                        if (err)
                                return err;
index 6cd29b1e8b926e26a7bc0df5b5c5263fc2e23d84..70fa8144999780cef19f5102247882ba6d24f6f6 100644 (file)
@@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (np->rxopt.all)
-               datagram_recv_ctl(sk, msg, skb);
+               ip6_datagram_recv_ctl(sk, msg, skb);
 
        err = copied;
        if (flags & MSG_TRUNC)
@@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
 
-               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                       &hlimit, &tclass, &dontfrag);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                           &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
index 06087e58738a553ce2ff050cb6bbbcac5d393014..bbb28ae7e5f315f9e356e4a7517d63389fef7ce5 100644 (file)
@@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                }
 
                inet_csk_reqsk_queue_drop(sk, req, prev);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
                goto out;
 
        case TCP_SYN_SENT:
@@ -959,8 +960,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                        goto drop;
        }
 
-       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
                goto drop;
+       }
 
        req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
        if (req == NULL)
@@ -1109,6 +1112,7 @@ drop_and_release:
 drop_and_free:
        reqsk_free(req);
 drop:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return 0; /* don't send reset */
 }
 
index cb5bf497c09c4e1ebffa8d2c2ad84c9fca678a15..599e1ba6d1ceaa6766d53eda020a69a0aa234e5b 100644 (file)
@@ -467,7 +467,7 @@ try_again:
                        ip_cmsg_recv(msg, skb);
        } else {
                if (np->rxopt.all)
-                       datagram_recv_ctl(sk, msg, skb);
+                       ip6_datagram_recv_ctl(sk, msg, skb);
        }
 
        err = copied;
@@ -1143,8 +1143,8 @@ do_udp_sendmsg:
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(*opt);
 
-               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                       &hlimit, &tclass, &dontfrag);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                           &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
index 1a9f3723c13cb45b608bbc07fe4a9803df926523..06389d5ff120bdee9ab09fe33de86d9a2854c13c 100644 (file)
@@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
 
 }
 
+/* Lookup the tunnel socket, possibly involving the fs code if the socket is
+ * owned by userspace.  A struct sock returned from this function must be
+ * released using l2tp_tunnel_sock_put once you're done with it.
+ */
+struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
+{
+       int err = 0;
+       struct socket *sock = NULL;
+       struct sock *sk = NULL;
+
+       if (!tunnel)
+               goto out;
+
+       if (tunnel->fd >= 0) {
+               /* Socket is owned by userspace, who might be in the process
+                * of closing it.  Look the socket up using the fd to ensure
+                * consistency.
+                */
+               sock = sockfd_lookup(tunnel->fd, &err);
+               if (sock)
+                       sk = sock->sk;
+       } else {
+               /* Socket is owned by kernelspace */
+               sk = tunnel->sock;
+       }
+
+out:
+       return sk;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
+
+/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
+void l2tp_tunnel_sock_put(struct sock *sk)
+{
+       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+       if (tunnel) {
+               if (tunnel->fd >= 0) {
+                       /* Socket is owned by userspace */
+                       sockfd_put(sk->sk_socket);
+               }
+               sock_put(sk);
+       }
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
+
 /* Lookup a session by id in the global session list
  */
 static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
@@ -1607,6 +1652,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        tunnel->old_sk_destruct = sk->sk_destruct;
        sk->sk_destruct = &l2tp_tunnel_destruct;
        tunnel->sock = sk;
+       tunnel->fd = fd;
        lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
 
        sk->sk_allocation = GFP_ATOMIC;
@@ -1642,24 +1688,32 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
  */
 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
 {
-       int err = 0;
-       struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL;
+       int err = -EBADF;
+       struct socket *sock = NULL;
+       struct sock *sk = NULL;
+
+       sk = l2tp_tunnel_sock_lookup(tunnel);
+       if (!sk)
+               goto out;
+
+       sock = sk->sk_socket;
+       BUG_ON(!sock);
 
        /* Force the tunnel socket to close. This will eventually
         * cause the tunnel to be deleted via the normal socket close
         * mechanisms when userspace closes the tunnel socket.
         */
-       if (sock != NULL) {
-               err = inet_shutdown(sock, 2);
+       err = inet_shutdown(sock, 2);
 
-               /* If the tunnel's socket was created by the kernel,
-                * close the socket here since the socket was not
-                * created by userspace.
-                */
-               if (sock->file == NULL)
-                       err = inet_release(sock);
-       }
+       /* If the tunnel's socket was created by the kernel,
+        * close the socket here since the socket was not
+        * created by userspace.
+        */
+       if (sock->file == NULL)
+               err = inet_release(sock);
 
+       l2tp_tunnel_sock_put(sk);
+out:
        return err;
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
index 56d583e083a7baf7b0cf707a76882a88701d8025..e62204cad4fe00c91e88f791c7c7896c130249fa 100644 (file)
@@ -188,7 +188,8 @@ struct l2tp_tunnel {
        int (*recv_payload_hook)(struct sk_buff *skb);
        void (*old_sk_destruct)(struct sock *);
        struct sock             *sock;          /* Parent socket */
-       int                     fd;
+       int                     fd;             /* Parent fd, if tunnel socket
+                                                * was created by userspace */
 
        uint8_t                 priv[0];        /* private data */
 };
@@ -228,6 +229,8 @@ out:
        return tunnel;
 }
 
+extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
+extern void l2tp_tunnel_sock_put(struct sock *sk);
 extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
 extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
 extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
index 927547171bc7119f57a7b8afecf453ec5eeabf61..8ee4a86ae996ca624e06a1422e4e07a2e957c584 100644 (file)
@@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
 
-               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                       &hlimit, &tclass, &dontfrag);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                           &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
@@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
                            struct msghdr *msg, size_t len, int noblock,
                            int flags, int *addr_len)
 {
-       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
        struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
        size_t copied = 0;
        int err = -EOPNOTSUPP;
@@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
                        lsa->l2tp_scope_id = IP6CB(skb)->iif;
        }
 
-       if (inet->cmsg_flags)
-               ip_cmsg_recv(msg, skb);
+       if (np->rxopt.all)
+               ip6_datagram_recv_ctl(sk, msg, skb);
 
        if (flags & MSG_TRUNC)
                copied = skb->len;
index e639645e8fec8ca365c8fc1bc4fb3650db04b5f4..c111bd0e083a5576d86dfc8f9ad91ab320c46efa 100644 (file)
@@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock)
 
        packet_flush_mclist(sk);
 
-       memset(&req_u, 0, sizeof(req_u));
-
-       if (po->rx_ring.pg_vec)
+       if (po->rx_ring.pg_vec) {
+               memset(&req_u, 0, sizeof(req_u));
                packet_set_ring(sk, &req_u, 1, 0);
+       }
 
-       if (po->tx_ring.pg_vec)
+       if (po->tx_ring.pg_vec) {
+               memset(&req_u, 0, sizeof(req_u));
                packet_set_ring(sk, &req_u, 1, 1);
+       }
 
        fanout_release(sk);
 
index 298c0ddfb57e3e5625a69d412c548d198005fd3b..3d2acc7a9c8099e677412276eeafe5a8b3f90fcb 100644 (file)
@@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                if (q->rate) {
                        struct sk_buff_head *list = &sch->q;
 
-                       delay += packet_len_2_sched_time(skb->len, q);
-
                        if (!skb_queue_empty(list)) {
                                /*
-                                * Last packet in queue is reference point (now).
-                                * First packet in queue is already in flight,
-                                * calculate this time bonus and substract
+                                * Last packet in queue is reference point (now),
+                                * calculate this time bonus and subtract
                                 * from delay.
                                 */
-                               delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
+                               delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now;
+                               delay = max_t(psched_tdiff_t, 0, delay);
                                now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
                        }
+
+                       delay += packet_len_2_sched_time(skb->len, q);
                }
 
                cb->time_to_send = now + delay;
index 0a148c9d2a5ce7b3915df8c10f5e5f0913b99c25..0f679df7d072794094ece1aa48988e834395d849 100644 (file)
@@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
 }
 
 /*
- * See net/ipv6/datagram.c : datagram_recv_ctl
+ * See net/ipv6/datagram.c : ip6_datagram_recv_ctl
  */
 static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
                                     struct cmsghdr *cmh)
index 01592d7d4789e389fd565e7cf9fb194dc7ee7bb0..45f1618c8e239c2db21692cfa5358f460918b76d 100644 (file)
@@ -1358,7 +1358,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
                                                  &iwe, IW_EV_UINT_LEN);
        }
 
-       buf = kmalloc(30, GFP_ATOMIC);
+       buf = kmalloc(31, GFP_ATOMIC);
        if (buf) {
                memset(&iwe, 0, sizeof(iwe));
                iwe.cmd = IWEVCUSTOM;