virtio-net: make all RX paths handle errors consistently
authorMichael S. Tsirkin <mst@redhat.com>
Thu, 26 Dec 2013 13:32:51 +0000 (15:32 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 15 Jan 2014 23:28:49 +0000 (15:28 -0800)
receive mergeable now handles errors internally.
Do same for big and small packet paths, otherwise
the logic is too hard to follow.

Cc: Jason Wang <jasowang@redhat.com>
Cc: David S. Miller <davem@davemloft.net>
Acked-by: Michael Dalton <mwdalton@google.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit f121159d72091f25afb22007c833e60a6845e912)
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/net/virtio_net.c

index ad9b385e1f3d6cd4feee09c565b360a0bebae3a3..ec0e9f236ff0446896a3a54f81693c08ec9228ad 100644 (file)
@@ -294,6 +294,34 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
        return skb;
 }
 
+static struct sk_buff *receive_small(void *buf, unsigned int len)
+{
+       struct sk_buff * skb = buf;
+
+       len -= sizeof(struct virtio_net_hdr);
+       skb_trim(skb, len);
+
+       return skb;
+}
+
+static struct sk_buff *receive_big(struct net_device *dev,
+                                  struct receive_queue *rq,
+                                  void *buf)
+{
+       struct page *page = buf;
+       struct sk_buff *skb = page_to_skb(rq, page, 0);
+
+       if (unlikely(!skb))
+               goto err;
+
+       return skb;
+
+err:
+       dev->stats.rx_dropped++;
+       give_pages(rq, page);
+       return NULL;
+}
+
 static struct sk_buff *receive_mergeable(struct net_device *dev,
                                         struct receive_queue *rq,
                                         void *buf,
@@ -357,7 +385,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
        struct net_device *dev = vi->dev;
        struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
        struct sk_buff *skb;
-       struct page *page;
        struct skb_vnet_hdr *hdr;
 
        if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
@@ -369,26 +396,15 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
                        dev_kfree_skb(buf);
                return;
        }
+       if (vi->mergeable_rx_bufs)
+               skb = receive_mergeable(dev, rq, buf, len);
+       else if (vi->big_packets)
+               skb = receive_big(dev, rq, buf);
+       else
+               skb = receive_small(buf, len);
 
-       if (!vi->mergeable_rx_bufs && !vi->big_packets) {
-               skb = buf;
-               len -= sizeof(struct virtio_net_hdr);
-               skb_trim(skb, len);
-       } else {
-               page = buf;
-               if (vi->mergeable_rx_bufs) {
-                       skb = receive_mergeable(dev, rq, page, len);
-                       if (unlikely(!skb))
-                               return;
-               } else {
-                       skb = page_to_skb(rq, page, len);
-                       if (unlikely(!skb)) {
-                               dev->stats.rx_dropped++;
-                               give_pages(rq, page);
-                               return;
-                       }
-               }
-       }
+       if (unlikely(!skb))
+               return;
 
        hdr = skb_vnet_hdr(skb);