Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / tcp.c
index 019243408623616680d8d74fb085d278b31f6163..ce572f9dff023529e6aaea9dcea9aa9476a9746d 100644 (file)
@@ -580,10 +580,6 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
                else if (!ret) {
                        if (spliced)
                                break;
-                       if (flags & SPLICE_F_NONBLOCK) {
-                               ret = -EAGAIN;
-                               break;
-                       }
                        if (sock_flag(sk, SOCK_DONE))
                                break;
                        if (sk->sk_err) {
@@ -1317,7 +1313,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                if ((available < target) &&
                    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
                    !sysctl_tcp_low_latency &&
-                   __get_cpu_var(softnet_data).net_dma) {
+                   dma_find_channel(DMA_MEMCPY)) {
                        preempt_enable_no_resched();
                        tp->ucopy.pinned_list =
                                        dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1527,7 +1523,7 @@ do_prequeue:
                if (!(flags & MSG_TRUNC)) {
 #ifdef CONFIG_NET_DMA
                        if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-                               tp->ucopy.dma_chan = get_softnet_dma();
+                               tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
 
                        if (tp->ucopy.dma_chan) {
                                tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -1632,7 +1628,6 @@ skip_copy:
 
                /* Safe to free early-copied skbs now */
                __skb_queue_purge(&sk->sk_async_wait_queue);
-               dma_chan_put(tp->ucopy.dma_chan);
                tp->ucopy.dma_chan = NULL;
        }
        if (tp->ucopy.pinned_list) {
@@ -1836,7 +1831,6 @@ adjudge_to_death:
        state = sk->sk_state;
        sock_hold(sk);
        sock_orphan(sk);
-       percpu_counter_inc(sk->sk_prot->orphan_count);
 
        /* It is the last release_sock in its life. It will remove backlog. */
        release_sock(sk);
@@ -1849,6 +1843,8 @@ adjudge_to_death:
        bh_lock_sock(sk);
        WARN_ON(sock_owned_by_user(sk));
 
+       percpu_counter_inc(sk->sk_prot->orphan_count);
+
        /* Have we already been destroyed by a softirq or backlog? */
        if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
                goto out;
@@ -2465,6 +2461,105 @@ out:
 }
 EXPORT_SYMBOL(tcp_tso_segment);
 
+struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+       struct sk_buff **pp = NULL;
+       struct sk_buff *p;
+       struct tcphdr *th;
+       struct tcphdr *th2;
+       unsigned int thlen;
+       unsigned int flags;
+       unsigned int total;
+       unsigned int mss = 1;
+       int flush = 1;
+
+       if (!pskb_may_pull(skb, sizeof(*th)))
+               goto out;
+
+       th = tcp_hdr(skb);
+       thlen = th->doff * 4;
+       if (thlen < sizeof(*th))
+               goto out;
+
+       if (!pskb_may_pull(skb, thlen))
+               goto out;
+
+       th = tcp_hdr(skb);
+       __skb_pull(skb, thlen);
+
+       flags = tcp_flag_word(th);
+
+       for (; (p = *head); head = &p->next) {
+               if (!NAPI_GRO_CB(p)->same_flow)
+                       continue;
+
+               th2 = tcp_hdr(p);
+
+               if (th->source != th2->source || th->dest != th2->dest) {
+                       NAPI_GRO_CB(p)->same_flow = 0;
+                       continue;
+               }
+
+               goto found;
+       }
+
+       goto out_check_final;
+
+found:
+       flush = NAPI_GRO_CB(p)->flush;
+       flush |= flags & TCP_FLAG_CWR;
+       flush |= (flags ^ tcp_flag_word(th2)) &
+                 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
+       flush |= th->ack_seq != th2->ack_seq || th->window != th2->window;
+       flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th));
+
+       total = p->len;
+       mss = skb_shinfo(p)->gso_size;
+
+       flush |= skb->len > mss || skb->len <= 0;
+       flush |= ntohl(th2->seq) + total != ntohl(th->seq);
+
+       if (flush || skb_gro_receive(head, skb)) {
+               mss = 1;
+               goto out_check_final;
+       }
+
+       p = *head;
+       th2 = tcp_hdr(p);
+       tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
+
+out_check_final:
+       flush = skb->len < mss;
+       flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
+                         TCP_FLAG_SYN | TCP_FLAG_FIN);
+
+       if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
+               pp = head;
+
+out:
+       NAPI_GRO_CB(skb)->flush |= flush;
+
+       return pp;
+}
+EXPORT_SYMBOL(tcp_gro_receive);
+
+int tcp_gro_complete(struct sk_buff *skb)
+{
+       struct tcphdr *th = tcp_hdr(skb);
+
+       skb->csum_start = skb_transport_header(skb) - skb->head;
+       skb->csum_offset = offsetof(struct tcphdr, check);
+       skb->ip_summed = CHECKSUM_PARTIAL;
+
+       skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+       if (th->cwr)
+               skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+
+       return 0;
+}
+EXPORT_SYMBOL(tcp_gro_complete);
+
 #ifdef CONFIG_TCP_MD5SIG
 static unsigned long tcp_md5sig_users;
 static struct tcp_md5sig_pool **tcp_md5sig_pool;