Merge remote branch 'wireless-next/master' into ath6kl-next
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / tcp_ipv4.c
index 0cb86ceb652ff66432ba584fedef8231d94decc6..c8d28c433b2b0dc958f7bdebaa77f2b899dfd22e 100644 (file)
@@ -138,6 +138,14 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 }
 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
 
+static int tcp_repair_connect(struct sock *sk)
+{
+       tcp_connect_init(sk);
+       tcp_finish_connect(sk, NULL);
+
+       return 0;
+}
+
 /* This will initiate an outgoing connection. */
 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
@@ -196,7 +204,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                /* Reset inherited state */
                tp->rx_opt.ts_recent       = 0;
                tp->rx_opt.ts_recent_stamp = 0;
-               tp->write_seq              = 0;
+               if (likely(!tp->repair))
+                       tp->write_seq      = 0;
        }
 
        if (tcp_death_row.sysctl_tw_recycle &&
@@ -247,7 +256,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        sk->sk_gso_type = SKB_GSO_TCPV4;
        sk_setup_caps(sk, &rt->dst);
 
-       if (!tp->write_seq)
+       if (!tp->write_seq && likely(!tp->repair))
                tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
                                                           inet->inet_daddr,
                                                           inet->inet_sport,
@@ -255,7 +264,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
        inet->inet_id = tp->write_seq ^ jiffies;
 
-       err = tcp_connect(sk);
+       if (likely(!tp->repair))
+               err = tcp_connect(sk);
+       else
+               err = tcp_repair_connect(sk);
+
        rt = NULL;
        if (err)
                goto failure;
@@ -811,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  */
 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
                              struct request_sock *req,
-                             struct request_values *rvp)
+                             struct request_values *rvp,
+                             u16 queue_mapping)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
        struct flowi4 fl4;
@@ -827,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
        if (skb) {
                __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
 
+               skb_set_queue_mapping(skb, queue_mapping);
                err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
                                            ireq->rmt_addr,
                                            ireq->opt);
@@ -841,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
                              struct request_values *rvp)
 {
        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-       return tcp_v4_send_synack(sk, NULL, req, rvp);
+       return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
 }
 
 /*
@@ -853,14 +868,14 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
 }
 
 /*
- * Return 1 if a syncookie should be sent
+ * Return true if a syncookie should be sent
  */
-int tcp_syn_flood_action(struct sock *sk,
+bool tcp_syn_flood_action(struct sock *sk,
                         const struct sk_buff *skb,
                         const char *proto)
 {
        const char *msg = "Dropping request";
-       int want_cookie = 0;
+       bool want_cookie = false;
        struct listen_sock *lopt;
 
 
@@ -868,7 +883,7 @@ int tcp_syn_flood_action(struct sock *sk,
 #ifdef CONFIG_SYN_COOKIES
        if (sysctl_tcp_syncookies) {
                msg = "Sending cookies";
-               want_cookie = 1;
+               want_cookie = true;
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
        } else
 #endif
@@ -1183,7 +1198,7 @@ clear_hash_noput:
 }
 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
 
-static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
+static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 {
        /*
         * This gets called for each TCP segment that arrives
@@ -1206,16 +1221,16 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 
        /* We've parsed the options - do we have a hash? */
        if (!hash_expected && !hash_location)
-               return 0;
+               return false;
 
        if (hash_expected && !hash_location) {
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
-               return 1;
+               return true;
        }
 
        if (!hash_expected && hash_location) {
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
-               return 1;
+               return true;
        }
 
        /* Okay, so this is hash_expected and hash_location -
@@ -1226,15 +1241,14 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
                                      NULL, NULL, skb);
 
        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
-               if (net_ratelimit()) {
-                       pr_info("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
-                               &iph->saddr, ntohs(th->source),
-                               &iph->daddr, ntohs(th->dest),
-                               genhash ? " tcp_v4_calc_md5_hash failed" : "");
-               }
-               return 1;
+               net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
+                                    &iph->saddr, ntohs(th->source),
+                                    &iph->daddr, ntohs(th->dest),
+                                    genhash ? " tcp_v4_calc_md5_hash failed"
+                                    : "");
+               return true;
        }
-       return 0;
+       return false;
 }
 
 #endif
@@ -1268,7 +1282,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        __be32 saddr = ip_hdr(skb)->saddr;
        __be32 daddr = ip_hdr(skb)->daddr;
        __u32 isn = TCP_SKB_CB(skb)->when;
-       int want_cookie = 0;
+       bool want_cookie = false;
 
        /* Never answer to SYNs send to broadcast or multicast */
        if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1327,7 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                while (l-- > 0)
                        *c++ ^= *hash_location++;
 
-               want_cookie = 0;        /* not our kind of cookie */
+               want_cookie = false;    /* not our kind of cookie */
                tmp_ext.cookie_out_never = 0; /* false */
                tmp_ext.cookie_plus = tmp_opt.cookie_plus;
        } else if (!tp->rx_opt.cookie_in_always) {
@@ -1355,7 +1369,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                goto drop_and_free;
 
        if (!want_cookie || tmp_opt.tstamp_ok)
-               TCP_ECN_create_request(req, tcp_hdr(skb));
+               TCP_ECN_create_request(req, skb);
 
        if (want_cookie) {
                isn = cookie_v4_init_sequence(sk, skb, &req->mss);
@@ -1410,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
        if (tcp_v4_send_synack(sk, dst, req,
-                              (struct request_values *)&tmp_ext) ||
+                              (struct request_values *)&tmp_ext,
+                              skb_get_queue_mapping(skb)) ||
            want_cookie)
                goto drop_and_free;
 
@@ -1739,7 +1754,8 @@ process:
                        if (!tcp_prequeue(sk, skb))
                                ret = tcp_v4_do_rcv(sk, skb);
                }
-       } else if (unlikely(sk_add_backlog(sk, skb))) {
+       } else if (unlikely(sk_add_backlog(sk, skb,
+                                          sk->sk_rcvbuf + sk->sk_sndbuf))) {
                bh_unlock_sock(sk);
                NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
                goto discard_and_relse;
@@ -1875,64 +1891,15 @@ static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
 static int tcp_v4_init_sock(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
-       struct tcp_sock *tp = tcp_sk(sk);
-
-       skb_queue_head_init(&tp->out_of_order_queue);
-       tcp_init_xmit_timers(sk);
-       tcp_prequeue_init(tp);
-
-       icsk->icsk_rto = TCP_TIMEOUT_INIT;
-       tp->mdev = TCP_TIMEOUT_INIT;
-
-       /* So many TCP implementations out there (incorrectly) count the
-        * initial SYN frame in their delayed-ACK and congestion control
-        * algorithms that we must have the following bandaid to talk
-        * efficiently to them.  -DaveM
-        */
-       tp->snd_cwnd = TCP_INIT_CWND;
-
-       /* See draft-stevens-tcpca-spec-01 for discussion of the
-        * initialization of these values.
-        */
-       tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
-       tp->snd_cwnd_clamp = ~0;
-       tp->mss_cache = TCP_MSS_DEFAULT;
 
-       tp->reordering = sysctl_tcp_reordering;
-       icsk->icsk_ca_ops = &tcp_init_congestion_ops;
-
-       sk->sk_state = TCP_CLOSE;
-
-       sk->sk_write_space = sk_stream_write_space;
-       sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+       tcp_init_sock(sk);
 
        icsk->icsk_af_ops = &ipv4_specific;
-       icsk->icsk_sync_mss = tcp_sync_mss;
+
 #ifdef CONFIG_TCP_MD5SIG
-       tp->af_specific = &tcp_sock_ipv4_specific;
+       tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
 #endif
 
-       /* TCP Cookie Transactions */
-       if (sysctl_tcp_cookie_size > 0) {
-               /* Default, cookies without s_data_payload. */
-               tp->cookie_values =
-                       kzalloc(sizeof(*tp->cookie_values),
-                               sk->sk_allocation);
-               if (tp->cookie_values != NULL)
-                       kref_init(&tp->cookie_values->kref);
-       }
-       /* Presumed zeroed, in order of appearance:
-        *      cookie_in_always, cookie_out_never,
-        *      s_data_constant, s_data_in, s_data_out
-        */
-       sk->sk_sndbuf = sysctl_tcp_wmem[1];
-       sk->sk_rcvbuf = sysctl_tcp_rmem[1];
-
-       local_bh_disable();
-       sock_update_memcg(sk);
-       sk_sockets_allocated_inc(sk);
-       local_bh_enable();
-
        return 0;
 }
 
@@ -2109,7 +2076,7 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
        return rc;
 }
 
-static inline int empty_bucket(struct tcp_iter_state *st)
+static inline bool empty_bucket(struct tcp_iter_state *st)
 {
        return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
                hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);