else if (!ret) {
if (spliced)
break;
- if (flags & SPLICE_F_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
if (sock_flag(sk, SOCK_DONE))
break;
if (sk->sk_err) {
if ((available < target) &&
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency &&
- __get_cpu_var(softnet_data).net_dma) {
+ dma_find_channel(DMA_MEMCPY)) {
preempt_enable_no_resched();
tp->ucopy.pinned_list =
dma_pin_iovec_pages(msg->msg_iov, len);
if (!(flags & MSG_TRUNC)) {
#ifdef CONFIG_NET_DMA
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
- tp->ucopy.dma_chan = get_softnet_dma();
+ tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
if (tp->ucopy.dma_chan) {
tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
/* Safe to free early-copied skbs now */
__skb_queue_purge(&sk->sk_async_wait_queue);
- dma_chan_put(tp->ucopy.dma_chan);
tp->ucopy.dma_chan = NULL;
}
if (tp->ucopy.pinned_list) {
state = sk->sk_state;
sock_hold(sk);
sock_orphan(sk);
- percpu_counter_inc(sk->sk_prot->orphan_count);
/* It is the last release_sock in its life. It will remove backlog. */
release_sock(sk);
bh_lock_sock(sk);
WARN_ON(sock_owned_by_user(sk));
+ percpu_counter_inc(sk->sk_prot->orphan_count);
+
/* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
goto out;
flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th));
total = p->len;
- mss = total;
- if (skb_shinfo(p)->frag_list)
- mss = skb_shinfo(p)->frag_list->len;
+ mss = skb_shinfo(p)->gso_size;
flush |= skb->len > mss || skb->len <= 0;
flush |= ntohl(th2->seq) + total != ntohl(th->seq);
return pp;
}
+EXPORT_SYMBOL(tcp_gro_receive);
int tcp_gro_complete(struct sk_buff *skb)
{
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
- skb_shinfo(skb)->gso_size = skb_shinfo(skb)->frag_list->len;
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
if (th->cwr)
return 0;
}
+EXPORT_SYMBOL(tcp_gro_complete);
#ifdef CONFIG_TCP_MD5SIG
static unsigned long tcp_md5sig_users;