udp: multicast RX should increment SNMP/sk_drops counter in allocation failures
authorEric Dumazet <eric.dumazet@gmail.com>
Sun, 8 Nov 2009 10:20:19 +0000 (10:20 +0000)
committerDavid S. Miller <davem@davemloft.net>
Mon, 9 Nov 2009 04:53:10 +0000 (20:53 -0800)
When skb_clone() fails, we should increment sk_drops and SNMP counters.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/udp.c
net/ipv6/udp.c

index 9d9072c6cce7a5591ff36c0b9f11cc5a30bc625c..d73e9170536be12bee0efc3bdd3aad4150c448cf 100644 (file)
@@ -1335,12 +1335,22 @@ static void flush_stack(struct sock **stack, unsigned int count,
 {
        unsigned int i;
        struct sk_buff *skb1 = NULL;
+       struct sock *sk;
 
        for (i = 0; i < count; i++) {
+               sk = stack[i];
                if (likely(skb1 == NULL))
                        skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
 
-               if (skb1 && udp_queue_rcv_skb(stack[i], skb1) <= 0)
+               if (!skb1) {
+                       atomic_inc(&sk->sk_drops);
+                       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+                                        IS_UDPLITE(sk));
+                       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+                                        IS_UDPLITE(sk));
+               }
+
+               if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
                        skb1 = NULL;
        }
        if (unlikely(skb1))
index 948e823d70c26a2062eda78501e8f8d4c7f123e6..2915e1dad726b17ae5bb114328b033481d79dd24 100644 (file)
@@ -579,14 +579,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
        for (i = 0; i < count; i++) {
                skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
 
+               sk = stack[i];
                if (skb1) {
-                       sk = stack[i];
                        bh_lock_sock(sk);
                        if (!sock_owned_by_user(sk))
                                udpv6_queue_rcv_skb(sk, skb1);
                        else
                                sk_add_backlog(sk, skb1);
                        bh_unlock_sock(sk);
+               } else {
+                       atomic_inc(&sk->sk_drops);
+                       UDP6_INC_STATS_BH(sock_net(sk),
+                                       UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
+                       UDP6_INC_STATS_BH(sock_net(sk),
+                                       UDP_MIB_INERRORS, IS_UDPLITE(sk));
                }
        }
 }