sctp: use limited socket backlog
authorZhu Yi <yi.zhu@intel.com>
Thu, 4 Mar 2010 18:01:44 +0000 (18:01 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 5 Mar 2010 21:34:01 +0000 (13:34 -0800)
Make sctp adapt to the limited socket backlog change.

Cc: Vlad Yasevich <vladislav.yasevich@hp.com>
Cc: Sridhar Samudrala <sri@us.ibm.com>
Signed-off-by: Zhu Yi <yi.zhu@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sctp/input.c
net/sctp/socket.c

index c0c973e67addd236fa438aca6ca84d3058495ebd..cbc063665e6b26c985df673aee508a5f70390f24 100644 (file)
@@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association(
                                        const union sctp_addr *peer,
                                        struct sctp_transport **pt);
 
-static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
+static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
 
 
 /* Calculate the SCTP checksum of an SCTP packet.  */
@@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb)
        }
 
        if (sock_owned_by_user(sk)) {
+               if (sctp_add_backlog(sk, skb)) {
+                       sctp_bh_unlock_sock(sk);
+                       sctp_chunk_free(chunk);
+                       skb = NULL; /* sctp_chunk_free already freed the skb */
+                       goto discard_release;
+               }
                SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
-               sctp_add_backlog(sk, skb);
        } else {
                SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
                sctp_inq_push(&chunk->rcvr->inqueue, chunk);
@@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
                sctp_bh_lock_sock(sk);
 
                if (sock_owned_by_user(sk)) {
-                       sk_add_backlog(sk, skb);
-                       backloged = 1;
+                       if (sk_add_backlog_limited(sk, skb))
+                               sctp_chunk_free(chunk);
+                       else
+                               backloged = 1;
                } else
                        sctp_inq_push(inqueue, chunk);
 
@@ -362,22 +369,27 @@ done:
        return 0;
 }
 
-static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
+static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
        struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
        struct sctp_ep_common *rcvr = chunk->rcvr;
+       int ret;
 
-       /* Hold the assoc/ep while hanging on the backlog queue.
-        * This way, we know structures we need will not disappear from us
-        */
-       if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
-               sctp_association_hold(sctp_assoc(rcvr));
-       else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
-               sctp_endpoint_hold(sctp_ep(rcvr));
-       else
-               BUG();
+       ret = sk_add_backlog_limited(sk, skb);
+       if (!ret) {
+               /* Hold the assoc/ep while hanging on the backlog queue.
+                * This way, we know structures we need will not disappear
+                * from us
+                */
+               if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+                       sctp_association_hold(sctp_assoc(rcvr));
+               else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+                       sctp_endpoint_hold(sctp_ep(rcvr));
+               else
+                       BUG();
+       }
+       return ret;
 
-       sk_add_backlog(sk, skb);
 }
 
 /* Handle icmp frag needed error. */
index f6d1e59c4151845b9dcfd0dc53428c88fdf079af..dfc5c127efd47d5cfca1a5541d3e91dff898c1b3 100644 (file)
@@ -3720,6 +3720,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
        SCTP_DBG_OBJCNT_INC(sock);
        percpu_counter_inc(&sctp_sockets_allocated);
 
+       /* Set socket backlog limit. */
+       sk->sk_backlog.limit = sysctl_sctp_rmem[1];
+
        local_bh_disable();
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
        local_bh_enable();