Merge tag 'v4.4.51' into linux-linaro-lsk-v4.4
authorAlex Shi <alex.shi@linaro.org>
Fri, 24 Feb 2017 04:03:18 +0000 (12:03 +0800)
committerAlex Shi <alex.shi@linaro.org>
Fri, 24 Feb 2017 04:03:18 +0000 (12:03 +0800)
 This is the 4.4.51 stable release

47 files changed:
Makefile
arch/arm/lib/getuser.S
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/input/mouse/elan_i2c_core.c
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/media/usb/siano/smsusb.c
drivers/mmc/core/mmc.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/loopback.c
drivers/net/macvtap.c
drivers/net/tun.c
drivers/ntb/ntb_transport.c
drivers/scsi/scsi_lib.c
drivers/scsi/sg.c
fs/fuse/dev.c
fs/splice.c
include/linux/can/core.h
include/linux/netdevice.h
include/net/cipso_ipv4.h
kernel/futex.c
kernel/printk/printk.c
net/can/af_can.c
net/can/af_can.h
net/can/bcm.c
net/can/gw.c
net/can/raw.c
net/core/dev.c
net/ethernet/eth.c
net/ipv4/cipso_ipv4.c
net/ipv4/ip_sockglue.c
net/ipv4/ping.c
net/ipv4/tcp.c
net/ipv4/tcp_output.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/packet/af_packet.c
net/sctp/socket.c

index 5fab6d4068b5c5f6592f9adc2e35cbfdc28afca7..117357188f01b03dcd3eb79cc5ac75da19280499 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 4
-SUBLEVEL = 49
+SUBLEVEL = 51
 EXTRAVERSION =
 NAME = Blurry Fish Butt
 
index 8ecfd15c3a0248db29667fe3dc6ec6429fc9fc7c..df73914e81c8344feccac5df8d5791dcbe92ed60 100644 (file)
@@ -67,7 +67,7 @@ ENTRY(__get_user_4)
 ENDPROC(__get_user_4)
 
 ENTRY(__get_user_8)
-       check_uaccess r0, 8, r1, r2, __get_user_bad
+       check_uaccess r0, 8, r1, r2, __get_user_bad8
 #ifdef CONFIG_THUMB2_KERNEL
 5: TUSER(ldr)  r2, [r0]
 6: TUSER(ldr)  r3, [r0, #4]
index 7cb2815e815e156aa9db7c89f164af42e96aef35..a3b96d691ac9a5ccbdf7c051be096205ef6e6b98 100644 (file)
@@ -1812,7 +1812,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
                                mgr->payloads[i].num_slots = req_payload.num_slots;
                        } else if (mgr->payloads[i].num_slots) {
                                mgr->payloads[i].num_slots = 0;
-                               drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
+                               drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
                                req_payload.payload_state = mgr->payloads[i].payload_state;
                                mgr->payloads[i].start_slot = 0;
                        }
index 04cec0da5d1e9d0385099691ec24e4f835d1b601..8901228b5d5d011eb06aebad573675791f8c6237 100644 (file)
@@ -205,8 +205,8 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
        }
 
        if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
-           x >= (crtc->x + crtc->mode.crtc_hdisplay) ||
-           y >= (crtc->y + crtc->mode.crtc_vdisplay))
+           x >= (crtc->x + crtc->mode.hdisplay) ||
+           y >= (crtc->y + crtc->mode.vdisplay))
                goto out_of_bounds;
 
        x += xorigin;
index d15b338130213c53489baa600d6dcea1500790d4..ed1935f300a740a6b383d5a99baa45c811bb8872 100644 (file)
@@ -1232,6 +1232,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0000", 0 },
        { "ELAN0100", 0 },
        { "ELAN0600", 0 },
+       { "ELAN0605", 0 },
        { "ELAN1000", 0 },
        { }
 };
index 6b420a55c7459f1af42b9dd9c920dce63bd7d089..c3ea03c9a1a8ef603a25934ccbe32ea4bfca3d66 100644 (file)
@@ -425,7 +425,7 @@ struct cache {
         * until a gc finishes - otherwise we could pointlessly burn a ton of
         * cpu
         */
-       unsigned                invalidate_needs_gc:1;
+       unsigned                invalidate_needs_gc;
 
        bool                    discard; /* Get rid of? */
 
@@ -593,8 +593,8 @@ struct cache_set {
 
        /* Counts how many sectors bio_insert has added to the cache */
        atomic_t                sectors_to_gc;
+       wait_queue_head_t       gc_wait;
 
-       wait_queue_head_t       moving_gc_wait;
        struct keybuf           moving_gc_keys;
        /* Number of moving GC bios in flight */
        struct semaphore        moving_in_flight;
index 22b9e34ceb75c5318a25870bf346a27ba2e64052..5b815e64c1c907d402769b6e26689c8576343c0e 100644 (file)
@@ -1762,33 +1762,34 @@ static void bch_btree_gc(struct cache_set *c)
        bch_moving_gc(c);
 }
 
-static int bch_gc_thread(void *arg)
+static bool gc_should_run(struct cache_set *c)
 {
-       struct cache_set *c = arg;
        struct cache *ca;
        unsigned i;
 
-       while (1) {
-again:
-               bch_btree_gc(c);
+       for_each_cache(ca, c, i)
+               if (ca->invalidate_needs_gc)
+                       return true;
 
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (kthread_should_stop())
-                       break;
+       if (atomic_read(&c->sectors_to_gc) < 0)
+               return true;
 
-               mutex_lock(&c->bucket_lock);
+       return false;
+}
 
-               for_each_cache(ca, c, i)
-                       if (ca->invalidate_needs_gc) {
-                               mutex_unlock(&c->bucket_lock);
-                               set_current_state(TASK_RUNNING);
-                               goto again;
-                       }
+static int bch_gc_thread(void *arg)
+{
+       struct cache_set *c = arg;
 
-               mutex_unlock(&c->bucket_lock);
+       while (1) {
+               wait_event_interruptible(c->gc_wait,
+                          kthread_should_stop() || gc_should_run(c));
 
-               try_to_freeze();
-               schedule();
+               if (kthread_should_stop())
+                       break;
+
+               set_gc_sectors(c);
+               bch_btree_gc(c);
        }
 
        return 0;
@@ -1796,11 +1797,10 @@ again:
 
 int bch_gc_thread_start(struct cache_set *c)
 {
-       c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
+       c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
        if (IS_ERR(c->gc_thread))
                return PTR_ERR(c->gc_thread);
 
-       set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
        return 0;
 }
 
index 5c391fa01bedbfba3f1dea062605460ccadc1c6a..9b80417cd547f52c264c1b4b993f3ee2155405f2 100644 (file)
@@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
 
 static inline void wake_up_gc(struct cache_set *c)
 {
-       if (c->gc_thread)
-               wake_up_process(c->gc_thread);
+       wake_up(&c->gc_wait);
 }
 
 #define MAP_DONE       0
index 25fa8445bb2422ba29f745aa2c0b68e37b3ec326..2410df1c2a054ca13a84aa0b84964f182395b9f0 100644 (file)
@@ -196,10 +196,8 @@ static void bch_data_insert_start(struct closure *cl)
        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
        struct bio *bio = op->bio, *n;
 
-       if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
-               set_gc_sectors(op->c);
+       if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
                wake_up_gc(op->c);
-       }
 
        if (op->bypass)
                return bch_data_invalidate(cl);
index 3d5c0ba131818c8b7238c7d57f1c1f8eb252c064..7b5880b8874cf3f570dad2286b07dc7da543f7da 100644 (file)
@@ -1489,6 +1489,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
        mutex_init(&c->bucket_lock);
        init_waitqueue_head(&c->btree_cache_wait);
        init_waitqueue_head(&c->bucket_wait);
+       init_waitqueue_head(&c->gc_wait);
        sema_init(&c->uuid_write_mutex, 1);
 
        spin_lock_init(&c->btree_gc_time.lock);
@@ -1547,6 +1548,7 @@ static void run_cache_set(struct cache_set *c)
 
        for_each_cache(ca, c, i)
                c->nbuckets += ca->sb.nbuckets;
+       set_gc_sectors(c);
 
        if (CACHE_SYNC(&c->sb)) {
                LIST_HEAD(journal);
index c945e4c2fbd49cdb0ffab93a53cd72ee37e81db7..ec30a004f31913c95d628a4615e912b456467328 100644 (file)
@@ -200,22 +200,30 @@ static int smsusb_start_streaming(struct smsusb_device_t *dev)
 static int smsusb_sendrequest(void *context, void *buffer, size_t size)
 {
        struct smsusb_device_t *dev = (struct smsusb_device_t *) context;
-       struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
-       int dummy;
+       struct sms_msg_hdr *phdr;
+       int dummy, ret;
 
        if (dev->state != SMSUSB_ACTIVE) {
                pr_debug("Device not active yet\n");
                return -ENOENT;
        }
 
+       phdr = kmalloc(size, GFP_KERNEL);
+       if (!phdr)
+               return -ENOMEM;
+       memcpy(phdr, buffer, size);
+
        pr_debug("sending %s(%d) size: %d\n",
                  smscore_translate_msg(phdr->msg_type), phdr->msg_type,
                  phdr->msg_length);
 
        smsendian_handle_tx_message((struct sms_msg_data *) phdr);
-       smsendian_handle_message_header((struct sms_msg_hdr *)buffer);
-       return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
-                           buffer, size, &dummy, 1000);
+       smsendian_handle_message_header((struct sms_msg_hdr *)phdr);
+       ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
+                           phdr, size, &dummy, 1000);
+
+       kfree(phdr);
+       return ret;
 }
 
 static char *smsusb1_fw_lkup[] = {
index 78187699467a11cb6ad43b9fb2298aaeee933a13..79a0c26e14190077d707f9e0141ca230e6d13d2d 100644 (file)
@@ -1581,10 +1581,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                err = mmc_select_hs400(card);
                if (err)
                        goto free_card;
-       } else if (mmc_card_hs(card)) {
+       } else {
                /* Select the desired bus width optionally */
                err = mmc_select_bus_width(card);
-               if (!IS_ERR_VALUE(err)) {
+               if (!IS_ERR_VALUE(err) && mmc_card_hs(card)) {
                        err = mmc_select_hs_ddr(card);
                        if (err)
                                goto free_card;
index bbff8ec6713e3359388e19470c1a54b2f121ec67..28a4b34310b2b750e5789d7f92b778bca45d0dc5 100644 (file)
@@ -502,8 +502,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
                return;
 
        for (ring = 0; ring < priv->rx_ring_num; ring++) {
-               if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
+               if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
+                       local_bh_disable();
                        napi_reschedule(&priv->rx_cq[ring]->napi);
+                       local_bh_enable();
+               }
        }
 }
 
index dc7d970bd1c0baaf1f707df87001da8f59f3d74a..effcdbfb06e96c37a1123a438a02bfd3ccf44b7f 100644 (file)
@@ -164,6 +164,7 @@ static void loopback_setup(struct net_device *dev)
 {
        dev->mtu                = 64 * 1024;
        dev->hard_header_len    = ETH_HLEN;     /* 14   */
+       dev->min_header_len     = ETH_HLEN;     /* 14   */
        dev->addr_len           = ETH_ALEN;     /* 6    */
        dev->type               = ARPHRD_LOOPBACK;      /* 0x0001*/
        dev->flags              = IFF_LOOPBACK;
index 159a68782bec9ac172c5fa168fe7dc05605f7ad4..79de9608ac485b5627f528aa5e235f2a4d97dae2 100644 (file)
@@ -725,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
        ssize_t n;
 
        if (q->flags & IFF_VNET_HDR) {
-               vnet_hdr_len = q->vnet_hdr_sz;
+               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
 
                err = -EINVAL;
                if (len < vnet_hdr_len)
@@ -865,7 +865,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
 
        if (q->flags & IFF_VNET_HDR) {
                struct virtio_net_hdr vnet_hdr;
-               vnet_hdr_len = q->vnet_hdr_sz;
+               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
                if (iov_iter_count(iter) < vnet_hdr_len)
                        return -EINVAL;
 
index 111b972e3053eab7ce299721165d8888494a7059..c31d8e74f131ed5da2e7cad82e655096d7126996 100644 (file)
@@ -1108,9 +1108,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        }
 
        if (tun->flags & IFF_VNET_HDR) {
-               if (len < tun->vnet_hdr_sz)
+               int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+
+               if (len < vnet_hdr_sz)
                        return -EINVAL;
-               len -= tun->vnet_hdr_sz;
+               len -= vnet_hdr_sz;
 
                n = copy_from_iter(&gso, sizeof(gso), from);
                if (n != sizeof(gso))
@@ -1122,7 +1124,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 
                if (tun16_to_cpu(tun, gso.hdr_len) > len)
                        return -EINVAL;
-               iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
+               iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
        }
 
        if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
@@ -1301,7 +1303,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                vlan_hlen = VLAN_HLEN;
 
        if (tun->flags & IFF_VNET_HDR)
-               vnet_hdr_sz = tun->vnet_hdr_sz;
+               vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
 
        total = skb->len + vlan_hlen + vnet_hdr_sz;
 
index 60654d524858c4bf52f6c5b5668c90719f523eda..ecc6fb9ca92f7ab08f79a803dbd91c1a3cdcb236 100644 (file)
@@ -1623,7 +1623,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
 
        node = dev_to_node(&ndev->dev);
 
-       free_queue = ffs(nt->qp_bitmap);
+       free_queue = ffs(nt->qp_bitmap_free);
        if (!free_queue)
                goto err;
 
@@ -2082,9 +2082,8 @@ module_init(ntb_transport_init);
 
 static void __exit ntb_transport_exit(void)
 {
-       debugfs_remove_recursive(nt_debugfs_dir);
-
        ntb_unregister_client(&ntb_transport_client);
        bus_unregister(&ntb_transport_bus);
+       debugfs_remove_recursive(nt_debugfs_dir);
 }
 module_exit(ntb_transport_exit);
index cf5b99e1f12b087cbed6831530175bc2c42d2928..8558e388696045fcc20d5743901b5143d3247037 100644 (file)
@@ -1120,7 +1120,8 @@ int scsi_init_io(struct scsi_cmnd *cmd)
        bool is_mq = (rq->mq_ctx != NULL);
        int error;
 
-       BUG_ON(!rq->nr_phys_segments);
+       if (WARN_ON_ONCE(!rq->nr_phys_segments))
+               return -EINVAL;
 
        error = scsi_init_sgtable(rq, &cmd->sdb);
        if (error)
index a1c29b0afb22e10cb4ae741a2080ad8cbcae1cbd..dedcff9cabb5c92835d2c7e72a477233bcbbc9a2 100644 (file)
@@ -1763,6 +1763,10 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
                        return res;
 
                iov_iter_truncate(&i, hp->dxfer_len);
+               if (!iov_iter_count(&i)) {
+                       kfree(iov);
+                       return -EINVAL;
+               }
 
                res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
                kfree(iov);
index 9096d44eb22155493176c89ede97234ba50e1c06..d0cf1f010fbe7604a781275c01a4f4e50417a13a 100644 (file)
@@ -418,6 +418,10 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
 {
        spin_lock(&fiq->waitq.lock);
+       if (test_bit(FR_FINISHED, &req->flags)) {
+               spin_unlock(&fiq->waitq.lock);
+               return;
+       }
        if (list_empty(&req->intr_entry)) {
                list_add_tail(&req->intr_entry, &fiq->interrupts);
                wake_up_locked(&fiq->waitq);
index 0f77e9682857a5b513b67b790af078f0b6fc9b28..8398974e15380dbeb8a93e56f81c69614e798323 100644 (file)
@@ -211,6 +211,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
                        buf->len = spd->partial[page_nr].len;
                        buf->private = spd->partial[page_nr].private;
                        buf->ops = spd->ops;
+                       buf->flags = 0;
                        if (spd->flags & SPLICE_F_GIFT)
                                buf->flags |= PIPE_BUF_FLAG_GIFT;
 
index a0875001b13c84ad70a9b2909654e9ffb6824c58..df08a41d5be5f26cfa4cdc74935f5eae7fa51385 100644 (file)
@@ -45,10 +45,9 @@ struct can_proto {
 extern int  can_proto_register(const struct can_proto *cp);
 extern void can_proto_unregister(const struct can_proto *cp);
 
-extern int  can_rx_register(struct net_device *dev, canid_t can_id,
-                           canid_t mask,
-                           void (*func)(struct sk_buff *, void *),
-                           void *data, char *ident);
+int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+                   void (*func)(struct sk_buff *, void *),
+                   void *data, char *ident, struct sock *sk);
 
 extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
                              canid_t mask,
index 93a6a2c66d15dc66351e752d1d8cb56ccec70aec..4035bbe4097112505ee6b7f74cca3c958f2c10d5 100644 (file)
@@ -1399,6 +1399,7 @@ enum netdev_priv_flags {
  *     @mtu:           Interface MTU value
  *     @type:          Interface hardware type
  *     @hard_header_len: Maximum hardware header length.
+ *     @min_header_len:  Minimum hardware header length
  *
  *     @needed_headroom: Extra headroom the hardware may need, but not in all
  *                       cases can this be guaranteed
@@ -1619,6 +1620,7 @@ struct net_device {
        unsigned int            mtu;
        unsigned short          type;
        unsigned short          hard_header_len;
+       unsigned short          min_header_len;
 
        unsigned short          needed_headroom;
        unsigned short          needed_tailroom;
@@ -2541,6 +2543,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
 {
        if (likely(len >= dev->hard_header_len))
                return true;
+       if (len < dev->min_header_len)
+               return false;
 
        if (capable(CAP_SYS_RAWIO)) {
                memset(ll_header + len, 0, dev->hard_header_len - len);
index 3ebb168b9afc68ad639b5d32f6182a845c83d759..a34b141f125f0032662f147b598c9fef4fb4bcef 100644 (file)
@@ -309,6 +309,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
        }
 
        for (opt_iter = 6; opt_iter < opt_len;) {
+               if (opt_iter + 1 == opt_len) {
+                       err_offset = opt_iter;
+                       goto out;
+               }
                tag_len = opt[opt_iter + 1];
                if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
                        err_offset = opt_iter + 1;
index e8af73cc51a7cd6bf453aaf03e5ccd5842aacca2..beb042dcc3329dc67281cb119bd61ba95fbee82f 100644 (file)
@@ -3199,4 +3199,4 @@ static int __init futex_init(void)
 
        return 0;
 }
-__initcall(futex_init);
+core_initcall(futex_init);
index c048e34b177f2a5e63557f672e9827330c1b34cf..0b5613554769a8364e68166a2ee738e1cfd5aa11 100644 (file)
@@ -1436,7 +1436,7 @@ static void call_console_drivers(int level,
 {
        struct console *con;
 
-       trace_console(text, len);
+       trace_console_rcuidle(text, len);
 
        if (level >= console_loglevel && !ignore_loglevel)
                return;
index 166d436196c14480184859e2bcaadeb7c5514f3a..928f580640980c9fbcbf2af4786674cb2b9be21d 100644 (file)
@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
  * @func: callback function on filter match
  * @data: returned parameter for callback function
  * @ident: string for calling module identification
+ * @sk: socket pointer (might be NULL)
  *
  * Description:
  *  Invokes the callback function with the received sk_buff and the given
@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
  */
 int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
                    void (*func)(struct sk_buff *, void *), void *data,
-                   char *ident)
+                   char *ident, struct sock *sk)
 {
        struct receiver *r;
        struct hlist_head *rl;
@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
                r->func    = func;
                r->data    = data;
                r->ident   = ident;
+               r->sk      = sk;
 
                hlist_add_head_rcu(&r->list, rl);
                d->entries++;
@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
 static void can_rx_delete_receiver(struct rcu_head *rp)
 {
        struct receiver *r = container_of(rp, struct receiver, rcu);
+       struct sock *sk = r->sk;
 
        kmem_cache_free(rcv_cache, r);
+       if (sk)
+               sock_put(sk);
 }
 
 /**
@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
        spin_unlock(&can_rcvlists_lock);
 
        /* schedule the receiver item for deletion */
-       if (r)
+       if (r) {
+               if (r->sk)
+                       sock_hold(r->sk);
                call_rcu(&r->rcu, can_rx_delete_receiver);
+       }
 }
 EXPORT_SYMBOL(can_rx_unregister);
 
index fca0fe9fc45a497cdf3da82d5414e846e7cc61b7..b86f5129e8385fe84ef671bb914e8e05c2977ca0 100644 (file)
 
 struct receiver {
        struct hlist_node list;
-       struct rcu_head rcu;
        canid_t can_id;
        canid_t mask;
        unsigned long matches;
        void (*func)(struct sk_buff *, void *);
        void *data;
        char *ident;
+       struct sock *sk;
+       struct rcu_head rcu;
 };
 
 #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
index 24d66c1cc0cd3dec0fcc9546a07e1f56ca801387..4ccfd356baedef930a15551ccc0c503a5e1e372b 100644 (file)
@@ -1179,7 +1179,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                                err = can_rx_register(dev, op->can_id,
                                                      REGMASK(op->can_id),
                                                      bcm_rx_handler, op,
-                                                     "bcm");
+                                                     "bcm", sk);
 
                                op->rx_reg_dev = dev;
                                dev_put(dev);
@@ -1188,7 +1188,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                } else
                        err = can_rx_register(NULL, op->can_id,
                                              REGMASK(op->can_id),
-                                             bcm_rx_handler, op, "bcm");
+                                             bcm_rx_handler, op, "bcm", sk);
                if (err) {
                        /* this bcm rx op is broken -> remove it */
                        list_del(&op->list);
index 455168718c2efb4fa6b9e396e945a3824437c65d..77c8af4047ef6f2164724ddc06328dbcbf2f422e 100644 (file)
@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
 {
        return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
                               gwj->ccgw.filter.can_mask, can_can_gw_rcv,
-                              gwj, "gw");
+                              gwj, "gw", NULL);
 }
 
 static inline void cgw_unregister_filter(struct cgw_job *gwj)
index 56af689ca9992d2566607d0251aa4f97a57ed51d..e9403a26a1d54dcdbe050326bdfe4ace440fbc4b 100644 (file)
@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
        for (i = 0; i < count; i++) {
                err = can_rx_register(dev, filter[i].can_id,
                                      filter[i].can_mask,
-                                     raw_rcv, sk, "raw");
+                                     raw_rcv, sk, "raw", sk);
                if (err) {
                        /* clean up successfully registered filters */
                        while (--i >= 0)
@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
 
        if (err_mask)
                err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
-                                     raw_rcv, sk, "raw");
+                                     raw_rcv, sk, "raw", sk);
 
        return err;
 }
index 0798a0f1b39524af6cd9be4077ecba83e8c46d03..08215a85c742a9999bf4cb2cd2b6b11e4a6fade0 100644 (file)
@@ -1676,24 +1676,19 @@ EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
 
 static struct static_key netstamp_needed __read_mostly;
 #ifdef HAVE_JUMP_LABEL
-/* We are not allowed to call static_key_slow_dec() from irq context
- * If net_disable_timestamp() is called from irq context, defer the
- * static_key_slow_dec() calls.
- */
 static atomic_t netstamp_needed_deferred;
-#endif
-
-void net_enable_timestamp(void)
+static void netstamp_clear(struct work_struct *work)
 {
-#ifdef HAVE_JUMP_LABEL
        int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
 
-       if (deferred) {
-               while (--deferred)
-                       static_key_slow_dec(&netstamp_needed);
-               return;
-       }
+       while (deferred--)
+               static_key_slow_dec(&netstamp_needed);
+}
+static DECLARE_WORK(netstamp_work, netstamp_clear);
 #endif
+
+void net_enable_timestamp(void)
+{
        static_key_slow_inc(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_enable_timestamp);
@@ -1701,12 +1696,12 @@ EXPORT_SYMBOL(net_enable_timestamp);
 void net_disable_timestamp(void)
 {
 #ifdef HAVE_JUMP_LABEL
-       if (in_interrupt()) {
-               atomic_inc(&netstamp_needed_deferred);
-               return;
-       }
-#endif
+       /* net_disable_timestamp() can be called from non process context */
+       atomic_inc(&netstamp_needed_deferred);
+       schedule_work(&netstamp_work);
+#else
        static_key_slow_dec(&netstamp_needed);
+#endif
 }
 EXPORT_SYMBOL(net_disable_timestamp);
 
index de85d4e1cf43c10016e159248a94e7fd544e4c04..52dcd414c2afbc70262e46fd1de25d166d297e60 100644 (file)
@@ -353,6 +353,7 @@ void ether_setup(struct net_device *dev)
        dev->header_ops         = &eth_header_ops;
        dev->type               = ARPHRD_ETHER;
        dev->hard_header_len    = ETH_HLEN;
+       dev->min_header_len     = ETH_HLEN;
        dev->mtu                = ETH_DATA_LEN;
        dev->addr_len           = ETH_ALEN;
        dev->tx_queue_len       = 1000; /* Ethernet wants good queues */
index bdb2a07ec363b709197435ac602b74377a600780..6cc3e1d602fb984a217c3bbad455222ecf2201c4 100644 (file)
@@ -1657,6 +1657,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
                                goto validate_return_locked;
                        }
 
+               if (opt_iter + 1 == opt_len) {
+                       err_offset = opt_iter;
+                       goto validate_return_locked;
+               }
                tag_len = tag[1];
                if (tag_len > (opt_len - opt_iter)) {
                        err_offset = opt_iter + 1;
index 9ce202549e7a1d7de0dbc02f45aef0ed901c4e90..bc14c5bb124bd5d6d34a03bfb359c4192e4e7612 100644 (file)
@@ -1192,7 +1192,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
                pktinfo->ipi_ifindex = 0;
                pktinfo->ipi_spec_dst.s_addr = 0;
        }
-       skb_dst_drop(skb);
+       /* We need to keep the dst for __ip_options_echo()
+        * We could restrict the test to opt.ts_needtime || opt.srr,
+        * but the following is good enough as IP options are not often used.
+        */
+       if (unlikely(IPCB(skb)->opt.optlen))
+               skb_dst_force(skb);
+       else
+               skb_dst_drop(skb);
 }
 
 int ip_setsockopt(struct sock *sk, int level,
index 23160d2b3f711348678a8b161eae12cf9715d8ff..3a00512addbc41143bc339804059c46b41540909 100644 (file)
@@ -645,6 +645,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
 {
        struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
 
+       if (!skb)
+               return 0;
        pfh->wcheck = csum_partial((char *)&pfh->icmph,
                sizeof(struct icmphdr), pfh->wcheck);
        pfh->icmph.checksum = csum_fold(pfh->wcheck);
index 69daa81736f60b9639358ee6151874a6e0bfbf7e..600dcda840d155e8d94fd6883c68dc774390bbbb 100644 (file)
@@ -783,6 +783,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
                                ret = -EAGAIN;
                                break;
                        }
+                       /* if __tcp_splice_read() got nothing while we have
+                        * an skb in receive queue, we do not want to loop.
+                        * This might happen with URG data.
+                        */
+                       if (!skb_queue_empty(&sk->sk_receive_queue))
+                               break;
                        sk_wait_data(sk, &timeo, NULL);
                        if (signal_pending(current)) {
                                ret = sock_intr_errno(timeo);
index 0795647e94c601d47245fd3ea610de8841b92af7..de95714d021c35e93f8ed52b7c96779e6e40cf6e 100644 (file)
@@ -2383,9 +2383,11 @@ u32 __tcp_select_window(struct sock *sk)
        int full_space = min_t(int, tp->window_clamp, allowed_space);
        int window;
 
-       if (mss > full_space)
+       if (unlikely(mss > full_space)) {
                mss = full_space;
-
+               if (mss <= 0)
+                       return 0;
+       }
        if (free_space < (full_space >> 1)) {
                icsk->icsk_ack.quick = 0;
 
index 17430f3410737fc1ae3fd0bd3375d18017454772..e89135828c3d4af5cc3f8ed94548fabfa8e7af53 100644 (file)
@@ -55,6 +55,7 @@
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
 #include <net/ip6_tunnel.h>
+#include <net/gre.h>
 
 
 static bool log_ecn_error = true;
@@ -367,35 +368,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
 
 
 static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-               u8 type, u8 code, int offset, __be32 info)
+                      u8 type, u8 code, int offset, __be32 info)
 {
-       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
-       __be16 *p = (__be16 *)(skb->data + offset);
-       int grehlen = offset + 4;
+       const struct gre_base_hdr *greh;
+       const struct ipv6hdr *ipv6h;
+       int grehlen = sizeof(*greh);
        struct ip6_tnl *t;
+       int key_off = 0;
        __be16 flags;
+       __be32 key;
 
-       flags = p[0];
-       if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
-               if (flags&(GRE_VERSION|GRE_ROUTING))
-                       return;
-               if (flags&GRE_KEY) {
-                       grehlen += 4;
-                       if (flags&GRE_CSUM)
-                               grehlen += 4;
-               }
+       if (!pskb_may_pull(skb, offset + grehlen))
+               return;
+       greh = (const struct gre_base_hdr *)(skb->data + offset);
+       flags = greh->flags;
+       if (flags & (GRE_VERSION | GRE_ROUTING))
+               return;
+       if (flags & GRE_CSUM)
+               grehlen += 4;
+       if (flags & GRE_KEY) {
+               key_off = grehlen + offset;
+               grehlen += 4;
        }
 
-       /* If only 8 bytes returned, keyed message will be dropped here */
-       if (!pskb_may_pull(skb, grehlen))
+       if (!pskb_may_pull(skb, offset + grehlen))
                return;
        ipv6h = (const struct ipv6hdr *)skb->data;
-       p = (__be16 *)(skb->data + offset);
+       greh = (const struct gre_base_hdr *)(skb->data + offset);
+       key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
 
        t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
-                               flags & GRE_KEY ?
-                               *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
-                               p[1]);
+                                key, greh->protocol);
        if (!t)
                return;
 
index 2994d1f1a6616070acb17598ebdf7e129634b216..6c6161763c2f73626823111610a07f6ad059460c 100644 (file)
@@ -479,18 +479,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
 
 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
 {
-       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
-       __u8 nexthdr = ipv6h->nexthdr;
-       __u16 off = sizeof(*ipv6h);
+       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
+       unsigned int nhoff = raw - skb->data;
+       unsigned int off = nhoff + sizeof(*ipv6h);
+       u8 next, nexthdr = ipv6h->nexthdr;
 
        while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
-               __u16 optlen = 0;
                struct ipv6_opt_hdr *hdr;
-               if (raw + off + sizeof(*hdr) > skb->data &&
-                   !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
+               u16 optlen;
+
+               if (!pskb_may_pull(skb, off + sizeof(*hdr)))
                        break;
 
-               hdr = (struct ipv6_opt_hdr *) (raw + off);
+               hdr = (struct ipv6_opt_hdr *)(skb->data + off);
                if (nexthdr == NEXTHDR_FRAGMENT) {
                        struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
                        if (frag_hdr->frag_off)
@@ -501,20 +502,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
                } else {
                        optlen = ipv6_optlen(hdr);
                }
+               /* cache hdr->nexthdr, since pskb_may_pull() might
+                * invalidate hdr
+                */
+               next = hdr->nexthdr;
                if (nexthdr == NEXTHDR_DEST) {
-                       __u16 i = off + 2;
+                       u16 i = 2;
+
+                       /* Remember : hdr is no longer valid at this point. */
+                       if (!pskb_may_pull(skb, off + optlen))
+                               break;
+
                        while (1) {
                                struct ipv6_tlv_tnl_enc_lim *tel;
 
                                /* No more room for encapsulation limit */
-                               if (i + sizeof (*tel) > off + optlen)
+                               if (i + sizeof(*tel) > optlen)
                                        break;
 
-                               tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
+                               tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
                                /* return index of option if found and valid */
                                if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
                                    tel->length == 1)
-                                       return i;
+                                       return i + off - nhoff;
                                /* else jump to next option */
                                if (tel->type)
                                        i += tel->length + 2;
@@ -522,7 +532,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
                                        i++;
                        }
                }
-               nexthdr = hdr->nexthdr;
+               nexthdr = next;
                off += optlen;
        }
        return 0;
index 3da2b16356eb64a9ff33fdc95809a768a81917f6..184f0fe35dc66569c836eca8cf3d048ccf218cb4 100644 (file)
@@ -1389,6 +1389,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
        tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
        if (!tunnel->dst_cache) {
                free_percpu(dev->tstats);
+               dev->tstats = NULL;
                return -ENOMEM;
        }
 
index 5f581616bf6a068269587077879cf56b7e637cb5..76a8c8057a2333944e667daea3b12805a4a3f2ca 100644 (file)
@@ -974,6 +974,16 @@ drop:
        return 0; /* don't send reset */
 }
 
+static void tcp_v6_restore_cb(struct sk_buff *skb)
+{
+       /* We need to move header back to the beginning if xfrm6_policy_check()
+        * and tcp_v6_fill_cb() are going to be called again.
+        * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
+        */
+       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+               sizeof(struct inet6_skb_parm));
+}
+
 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                                         struct request_sock *req,
                                         struct dst_entry *dst,
@@ -1163,8 +1173,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
                                                      sk_gfp_atomic(sk, GFP_ATOMIC));
                        consume_skb(ireq->pktopts);
                        ireq->pktopts = NULL;
-                       if (newnp->pktoptions)
+                       if (newnp->pktoptions) {
+                               tcp_v6_restore_cb(newnp->pktoptions);
                                skb_set_owner_r(newnp->pktoptions, newsk);
+                       }
                }
        }
 
@@ -1179,16 +1191,6 @@ out:
        return NULL;
 }
 
-static void tcp_v6_restore_cb(struct sk_buff *skb)
-{
-       /* We need to move header back to the beginning if xfrm6_policy_check()
-        * and tcp_v6_fill_cb() are going to be called again.
-        * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
-        */
-       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
-               sizeof(struct inet6_skb_parm));
-}
-
 /* The socket must have it's spinlock held when we get
  * here, unless it is a TCP_LISTEN socket.
  *
index 5871537af387b2566c8ffaa79d107efaf3701fae..763e8e241ce3ec9107e0ad03e21393596d854e2a 100644 (file)
@@ -273,6 +273,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
 int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
                         const struct l2tp_nl_cmd_ops *ops);
 void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
+int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 
 /* Session reference counts. Incremented when code obtains a reference
  * to a session.
index d0e906d3964281d4cbcd4a9c2d97b25d6eb85b80..445b7cd0826ae11b47513dd66cb659419e5b7b3a 100644 (file)
@@ -11,6 +11,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <asm/ioctls.h>
 #include <linux/icmp.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
@@ -555,6 +556,30 @@ out:
        return err ? err : copied;
 }
 
+int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+       struct sk_buff *skb;
+       int amount;
+
+       switch (cmd) {
+       case SIOCOUTQ:
+               amount = sk_wmem_alloc_get(sk);
+               break;
+       case SIOCINQ:
+               spin_lock_bh(&sk->sk_receive_queue.lock);
+               skb = skb_peek(&sk->sk_receive_queue);
+               amount = skb ? skb->len : 0;
+               spin_unlock_bh(&sk->sk_receive_queue.lock);
+               break;
+
+       default:
+               return -ENOIOCTLCMD;
+       }
+
+       return put_user(amount, (int __user *)arg);
+}
+EXPORT_SYMBOL(l2tp_ioctl);
+
 static struct proto l2tp_ip_prot = {
        .name              = "L2TP/IP",
        .owner             = THIS_MODULE,
@@ -563,7 +588,7 @@ static struct proto l2tp_ip_prot = {
        .bind              = l2tp_ip_bind,
        .connect           = l2tp_ip_connect,
        .disconnect        = l2tp_ip_disconnect,
-       .ioctl             = udp_ioctl,
+       .ioctl             = l2tp_ioctl,
        .destroy           = l2tp_ip_destroy_sock,
        .setsockopt        = ip_setsockopt,
        .getsockopt        = ip_getsockopt,
index 3c4f867d3633144f38c2ddc5783dd0fac23832f8..bcdab1cba7738ff80c872071c70dc2762966e9b8 100644 (file)
@@ -714,7 +714,7 @@ static struct proto l2tp_ip6_prot = {
        .bind              = l2tp_ip6_bind,
        .connect           = l2tp_ip6_connect,
        .disconnect        = l2tp_ip6_disconnect,
-       .ioctl             = udp_ioctl,
+       .ioctl             = l2tp_ioctl,
        .destroy           = l2tp_ip6_destroy_sock,
        .setsockopt        = ipv6_setsockopt,
        .getsockopt        = ipv6_getsockopt,
index f223d1c80ccf7f9251dce0f8f31f58c766576946..f2d28ed74a0a1bdab627f8753fab800d31f10453 100644 (file)
@@ -2637,7 +2637,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        int vnet_hdr_len;
        struct packet_sock *po = pkt_sk(sk);
        unsigned short gso_type = 0;
-       int hlen, tlen;
+       int hlen, tlen, linear;
        int extra_len = 0;
        ssize_t n;
 
@@ -2741,8 +2741,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        err = -ENOBUFS;
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
-       skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
-                              __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
+       linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
+       linear = max(linear, min_t(int, len, dev->hard_header_len));
+       skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
                               msg->msg_flags & MSG_DONTWAIT, &err);
        if (skb == NULL)
                goto out_unlock;
index b5fd4ab56156b09c33ec33b72125c34e221cac54..138f2d6672122416b4bc59ad9fc11260474cfd31 100644 (file)
@@ -6960,7 +6960,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
                 */
                release_sock(sk);
                current_timeo = schedule_timeout(current_timeo);
-               BUG_ON(sk != asoc->base.sk);
+               if (sk != asoc->base.sk)
+                       goto do_error;
                lock_sock(sk);
 
                *timeo_p = current_timeo;