regulator: xz3216: Add a sentinel to xz3216_i2c_id[]
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / tcp.c
index c1728771cf89c46a82af0187a02029450adb854b..6ecfc9de599ccede1db44d92e51f4304b34cd09c 100644 (file)
 
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
+#include <asm/unaligned.h>
 #include <net/busy_poll.h>
 
 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
@@ -517,8 +518,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
                        if (sk_stream_is_writeable(sk)) {
                                mask |= POLLOUT | POLLWRNORM;
                        } else {  /* send SIGIO later */
-                               set_bit(SOCK_ASYNC_NOSPACE,
-                                       &sk->sk_socket->flags);
+                               sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
                                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 
                                /* Race breaker. If space is freed after
@@ -906,7 +906,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
                        goto out_err;
        }
 
-       clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+       sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
        mss_now = tcp_send_mss(sk, &size_goal, flags);
        copied = 0;
@@ -939,7 +939,7 @@ new_segment:
 
                i = skb_shinfo(skb)->nr_frags;
                can_coalesce = skb_can_coalesce(skb, i, page, offset);
-               if (!can_coalesce && i >= MAX_SKB_FRAGS) {
+               if (!can_coalesce && i >= sysctl_max_skb_frags) {
                        tcp_mark_push(tp, skb);
                        goto new_segment;
                }
@@ -1134,7 +1134,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        }
 
        /* This should be in poll */
-       clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+       sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
        mss_now = tcp_send_mss(sk, &size_goal, flags);
 
@@ -1212,7 +1212,7 @@ new_segment:
 
                        if (!skb_can_coalesce(skb, i, pfrag->page,
                                              pfrag->offset)) {
-                               if (i == MAX_SKB_FRAGS || !sg) {
+                               if (i == sysctl_max_skb_frags || !sg) {
                                        tcp_mark_push(tp, skb);
                                        goto new_segment;
                                }
@@ -2638,6 +2638,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 now = tcp_time_stamp;
        unsigned int start;
+       u64 rate64;
        u32 rate;
 
        memset(info, 0, sizeof(*info));
@@ -2703,15 +2704,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        info->tcpi_total_retrans = tp->total_retrans;
 
        rate = READ_ONCE(sk->sk_pacing_rate);
-       info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
+       rate64 = rate != ~0U ? rate : ~0ULL;
+       put_unaligned(rate64, &info->tcpi_pacing_rate);
 
        rate = READ_ONCE(sk->sk_max_pacing_rate);
-       info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
+       rate64 = rate != ~0U ? rate : ~0ULL;
+       put_unaligned(rate64, &info->tcpi_max_pacing_rate);
 
        do {
                start = u64_stats_fetch_begin_irq(&tp->syncp);
-               info->tcpi_bytes_acked = tp->bytes_acked;
-               info->tcpi_bytes_received = tp->bytes_received;
+               put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
+               put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
        } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
        info->tcpi_segs_out = tp->segs_out;
        info->tcpi_segs_in = tp->segs_in;
@@ -3081,6 +3084,52 @@ void tcp_done(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(tcp_done);
 
+int tcp_abort(struct sock *sk, int err)
+{
+       if (!sk_fullsock(sk)) {
+               if (sk->sk_state == TCP_NEW_SYN_RECV) {
+                       struct request_sock *req = inet_reqsk(sk);
+
+                       local_bh_disable();
+                       inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
+                                                         req);
+                       local_bh_enable();
+                       return 0;
+               }
+               sock_gen_put(sk);
+               return -EOPNOTSUPP;
+       }
+
+       /* Don't race with userspace socket closes such as tcp_close. */
+       lock_sock(sk);
+
+       if (sk->sk_state == TCP_LISTEN) {
+               tcp_set_state(sk, TCP_CLOSE);
+               inet_csk_listen_stop(sk);
+       }
+
+       /* Don't race with BH socket closes such as inet_csk_listen_stop. */
+       local_bh_disable();
+       bh_lock_sock(sk);
+
+       if (!sock_flag(sk, SOCK_DEAD)) {
+               sk->sk_err = err;
+               /* This barrier is coupled with smp_rmb() in tcp_poll() */
+               smp_wmb();
+               sk->sk_error_report(sk);
+               if (tcp_need_reset(sk->sk_state))
+                       tcp_send_active_reset(sk, GFP_ATOMIC);
+               tcp_done(sk);
+       }
+
+       bh_unlock_sock(sk);
+       local_bh_enable();
+       release_sock(sk);
+       sock_put(sk);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(tcp_abort);
+
 extern struct tcp_congestion_ops tcp_reno;
 
 static __initdata unsigned long thash_entries;