ARM64: dts: rockchip: enable tsadc node for rk3366-tb
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / inet_connection_sock.c
index 1feb15f23de8c4f673fd0fe713df2dd9195995cf..030cd09dd2a24d2480346a0e19fece73356c8c0e 100644 (file)
@@ -420,7 +420,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
                           ireq->ir_loc_addr, ireq->ir_rmt_port,
-                          htons(ireq->ir_num));
+                          htons(ireq->ir_num), sock_i_uid((struct sock *)sk));
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
@@ -457,7 +457,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
                           ireq->ir_loc_addr, ireq->ir_rmt_port,
-                          htons(ireq->ir_num));
+                          htons(ireq->ir_num), sock_i_uid((struct sock *)sk));
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
@@ -563,7 +563,7 @@ static void reqsk_timer_handler(unsigned long data)
        int max_retries, thresh;
        u8 defer_accept;
 
-       if (sk_listener->sk_state != TCP_LISTEN)
+       if (sk_state_load(sk_listener) != TCP_LISTEN)
                goto drop;
 
        max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
@@ -749,7 +749,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
         * It is OK, because this socket enters to hash table only
         * after validation is complete.
         */
-       sk->sk_state = TCP_LISTEN;
+       sk_state_store(sk, TCP_LISTEN);
        if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
                inet->inet_sport = htons(inet->inet_num);
 
@@ -789,14 +789,16 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
        reqsk_put(req);
 }
 
-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
-                             struct sock *child)
+struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
+                                     struct request_sock *req,
+                                     struct sock *child)
 {
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
 
        spin_lock(&queue->rskq_lock);
        if (unlikely(sk->sk_state != TCP_LISTEN)) {
                inet_child_forget(sk, req, child);
+               child = NULL;
        } else {
                req->sk = child;
                req->dl_next = NULL;
@@ -808,6 +810,7 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
                sk_acceptq_added(sk);
        }
        spin_unlock(&queue->rskq_lock);
+       return child;
 }
 EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
 
@@ -817,11 +820,8 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
        if (own_req) {
                inet_csk_reqsk_queue_drop(sk, req);
                reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
-               inet_csk_reqsk_queue_add(sk, req, child);
-               /* Warning: caller must not call reqsk_put(req);
-                * child stole last reference on it.
-                */
-               return child;
+               if (inet_csk_reqsk_queue_add(sk, req, child))
+                       return child;
        }
        /* Too bad, another child took ownership of the request, undo. */
        bh_unlock_sock(child);