inet_hashinfo: remove bsocket counter
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / inet_connection_sock.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Support for INET connection oriented protocols.
7  *
8  * Authors:     See the TCP sources
9  *
10  *              This program is free software; you can redistribute it and/or
11  *              modify it under the terms of the GNU General Public License
12  *              as published by the Free Software Foundation; either version
13  *              2 of the License, or(at your option) any later version.
14  */
15
16 #include <linux/module.h>
17 #include <linux/jhash.h>
18
19 #include <net/inet_connection_sock.h>
20 #include <net/inet_hashtables.h>
21 #include <net/inet_timewait_sock.h>
22 #include <net/ip.h>
23 #include <net/route.h>
24 #include <net/tcp_states.h>
25 #include <net/xfrm.h>
26 #include <net/tcp.h>
27
28 #ifdef INET_CSK_DEBUG
29 const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
30 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
31 #endif
32
33 void inet_get_local_port_range(struct net *net, int *low, int *high)
34 {
35         unsigned int seq;
36
37         do {
38                 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
39
40                 *low = net->ipv4.ip_local_ports.range[0];
41                 *high = net->ipv4.ip_local_ports.range[1];
42         } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
43 }
44 EXPORT_SYMBOL(inet_get_local_port_range);
45
46 int inet_csk_bind_conflict(const struct sock *sk,
47                            const struct inet_bind_bucket *tb, bool relax)
48 {
49         struct sock *sk2;
50         int reuse = sk->sk_reuse;
51         int reuseport = sk->sk_reuseport;
52         kuid_t uid = sock_i_uid((struct sock *)sk);
53
54         /*
55          * Unlike other sk lookup places we do not check
56          * for sk_net here, since _all_ the socks listed
57          * in tb->owners list belong to the same net - the
58          * one this bucket belongs to.
59          */
60
61         sk_for_each_bound(sk2, &tb->owners) {
62                 if (sk != sk2 &&
63                     !inet_v6_ipv6only(sk2) &&
64                     (!sk->sk_bound_dev_if ||
65                      !sk2->sk_bound_dev_if ||
66                      sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
67                         if ((!reuse || !sk2->sk_reuse ||
68                             sk2->sk_state == TCP_LISTEN) &&
69                             (!reuseport || !sk2->sk_reuseport ||
70                             (sk2->sk_state != TCP_TIME_WAIT &&
71                              !uid_eq(uid, sock_i_uid(sk2))))) {
72
73                                 if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
74                                     sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
75                                         break;
76                         }
77                         if (!relax && reuse && sk2->sk_reuse &&
78                             sk2->sk_state != TCP_LISTEN) {
79
80                                 if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
81                                     sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
82                                         break;
83                         }
84                 }
85         }
86         return sk2 != NULL;
87 }
88 EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
89
90 /* Obtain a reference to a local port for the given sock,
91  * if snum is zero it means select any available local port.
92  */
93 int inet_csk_get_port(struct sock *sk, unsigned short snum)
94 {
95         struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
96         struct inet_bind_hashbucket *head;
97         struct inet_bind_bucket *tb;
98         int ret, attempts = 5;
99         struct net *net = sock_net(sk);
100         int smallest_size = -1, smallest_rover;
101         kuid_t uid = sock_i_uid(sk);
102
103         local_bh_disable();
104         if (!snum) {
105                 int remaining, rover, low, high;
106
107 again:
108                 inet_get_local_port_range(net, &low, &high);
109                 remaining = (high - low) + 1;
110                 smallest_rover = rover = prandom_u32() % remaining + low;
111
112                 smallest_size = -1;
113                 do {
114                         if (inet_is_local_reserved_port(net, rover))
115                                 goto next_nolock;
116                         head = &hashinfo->bhash[inet_bhashfn(net, rover,
117                                         hashinfo->bhash_size)];
118                         spin_lock(&head->lock);
119                         inet_bind_bucket_for_each(tb, &head->chain)
120                                 if (net_eq(ib_net(tb), net) && tb->port == rover) {
121                                         if (((tb->fastreuse > 0 &&
122                                               sk->sk_reuse &&
123                                               sk->sk_state != TCP_LISTEN) ||
124                                              (tb->fastreuseport > 0 &&
125                                               sk->sk_reuseport &&
126                                               uid_eq(tb->fastuid, uid))) &&
127                                             (tb->num_owners < smallest_size || smallest_size == -1)) {
128                                                 smallest_size = tb->num_owners;
129                                                 smallest_rover = rover;
130                                         }
131                                         if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
132                                                 snum = rover;
133                                                 goto tb_found;
134                                         }
135                                         goto next;
136                                 }
137                         break;
138                 next:
139                         spin_unlock(&head->lock);
140                 next_nolock:
141                         if (++rover > high)
142                                 rover = low;
143                 } while (--remaining > 0);
144
145                 /* Exhausted local port range during search?  It is not
146                  * possible for us to be holding one of the bind hash
147                  * locks if this test triggers, because if 'remaining'
148                  * drops to zero, we broke out of the do/while loop at
149                  * the top level, not from the 'break;' statement.
150                  */
151                 ret = 1;
152                 if (remaining <= 0) {
153                         if (smallest_size != -1) {
154                                 snum = smallest_rover;
155                                 goto have_snum;
156                         }
157                         goto fail;
158                 }
159                 /* OK, here is the one we will use.  HEAD is
160                  * non-NULL and we hold it's mutex.
161                  */
162                 snum = rover;
163         } else {
164 have_snum:
165                 head = &hashinfo->bhash[inet_bhashfn(net, snum,
166                                 hashinfo->bhash_size)];
167                 spin_lock(&head->lock);
168                 inet_bind_bucket_for_each(tb, &head->chain)
169                         if (net_eq(ib_net(tb), net) && tb->port == snum)
170                                 goto tb_found;
171         }
172         tb = NULL;
173         goto tb_not_found;
174 tb_found:
175         if (!hlist_empty(&tb->owners)) {
176                 if (sk->sk_reuse == SK_FORCE_REUSE)
177                         goto success;
178
179                 if (((tb->fastreuse > 0 &&
180                       sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
181                      (tb->fastreuseport > 0 &&
182                       sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
183                     smallest_size == -1) {
184                         goto success;
185                 } else {
186                         ret = 1;
187                         if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
188                                 if (((sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
189                                      (tb->fastreuseport > 0 &&
190                                       sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
191                                     smallest_size != -1 && --attempts >= 0) {
192                                         spin_unlock(&head->lock);
193                                         goto again;
194                                 }
195
196                                 goto fail_unlock;
197                         }
198                 }
199         }
200 tb_not_found:
201         ret = 1;
202         if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
203                                         net, head, snum)) == NULL)
204                 goto fail_unlock;
205         if (hlist_empty(&tb->owners)) {
206                 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
207                         tb->fastreuse = 1;
208                 else
209                         tb->fastreuse = 0;
210                 if (sk->sk_reuseport) {
211                         tb->fastreuseport = 1;
212                         tb->fastuid = uid;
213                 } else
214                         tb->fastreuseport = 0;
215         } else {
216                 if (tb->fastreuse &&
217                     (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
218                         tb->fastreuse = 0;
219                 if (tb->fastreuseport &&
220                     (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)))
221                         tb->fastreuseport = 0;
222         }
223 success:
224         if (!inet_csk(sk)->icsk_bind_hash)
225                 inet_bind_hash(sk, tb, snum);
226         WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
227         ret = 0;
228
229 fail_unlock:
230         spin_unlock(&head->lock);
231 fail:
232         local_bh_enable();
233         return ret;
234 }
235 EXPORT_SYMBOL_GPL(inet_csk_get_port);
236
237 /*
238  * Wait for an incoming connection, avoid race conditions. This must be called
239  * with the socket locked.
240  */
241 static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
242 {
243         struct inet_connection_sock *icsk = inet_csk(sk);
244         DEFINE_WAIT(wait);
245         int err;
246
247         /*
248          * True wake-one mechanism for incoming connections: only
249          * one process gets woken up, not the 'whole herd'.
250          * Since we do not 'race & poll' for established sockets
251          * anymore, the common case will execute the loop only once.
252          *
253          * Subtle issue: "add_wait_queue_exclusive()" will be added
254          * after any current non-exclusive waiters, and we know that
255          * it will always _stay_ after any new non-exclusive waiters
256          * because all non-exclusive waiters are added at the
257          * beginning of the wait-queue. As such, it's ok to "drop"
258          * our exclusiveness temporarily when we get woken up without
259          * having to remove and re-insert us on the wait queue.
260          */
261         for (;;) {
262                 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
263                                           TASK_INTERRUPTIBLE);
264                 release_sock(sk);
265                 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
266                         timeo = schedule_timeout(timeo);
267                 sched_annotate_sleep();
268                 lock_sock(sk);
269                 err = 0;
270                 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
271                         break;
272                 err = -EINVAL;
273                 if (sk->sk_state != TCP_LISTEN)
274                         break;
275                 err = sock_intr_errno(timeo);
276                 if (signal_pending(current))
277                         break;
278                 err = -EAGAIN;
279                 if (!timeo)
280                         break;
281         }
282         finish_wait(sk_sleep(sk), &wait);
283         return err;
284 }
285
286 /*
287  * This will accept the next outstanding connection.
288  */
289 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
290 {
291         struct inet_connection_sock *icsk = inet_csk(sk);
292         struct request_sock_queue *queue = &icsk->icsk_accept_queue;
293         struct request_sock *req;
294         struct sock *newsk;
295         int error;
296
297         lock_sock(sk);
298
299         /* We need to make sure that this socket is listening,
300          * and that it has something pending.
301          */
302         error = -EINVAL;
303         if (sk->sk_state != TCP_LISTEN)
304                 goto out_err;
305
306         /* Find already established connection */
307         if (reqsk_queue_empty(queue)) {
308                 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
309
310                 /* If this is a non blocking socket don't sleep */
311                 error = -EAGAIN;
312                 if (!timeo)
313                         goto out_err;
314
315                 error = inet_csk_wait_for_connect(sk, timeo);
316                 if (error)
317                         goto out_err;
318         }
319         req = reqsk_queue_remove(queue);
320         newsk = req->sk;
321
322         sk_acceptq_removed(sk);
323         if (sk->sk_protocol == IPPROTO_TCP &&
324             tcp_rsk(req)->tfo_listener &&
325             queue->fastopenq) {
326                 spin_lock_bh(&queue->fastopenq->lock);
327                 if (tcp_rsk(req)->tfo_listener) {
328                         /* We are still waiting for the final ACK from 3WHS
329                          * so can't free req now. Instead, we set req->sk to
330                          * NULL to signify that the child socket is taken
331                          * so reqsk_fastopen_remove() will free the req
332                          * when 3WHS finishes (or is aborted).
333                          */
334                         req->sk = NULL;
335                         req = NULL;
336                 }
337                 spin_unlock_bh(&queue->fastopenq->lock);
338         }
339 out:
340         release_sock(sk);
341         if (req)
342                 reqsk_put(req);
343         return newsk;
344 out_err:
345         newsk = NULL;
346         req = NULL;
347         *err = error;
348         goto out;
349 }
350 EXPORT_SYMBOL(inet_csk_accept);
351
352 /*
353  * Using different timers for retransmit, delayed acks and probes
354  * We may wish use just one timer maintaining a list of expire jiffies
355  * to optimize.
356  */
357 void inet_csk_init_xmit_timers(struct sock *sk,
358                                void (*retransmit_handler)(unsigned long),
359                                void (*delack_handler)(unsigned long),
360                                void (*keepalive_handler)(unsigned long))
361 {
362         struct inet_connection_sock *icsk = inet_csk(sk);
363
364         setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
365                         (unsigned long)sk);
366         setup_timer(&icsk->icsk_delack_timer, delack_handler,
367                         (unsigned long)sk);
368         setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
369         icsk->icsk_pending = icsk->icsk_ack.pending = 0;
370 }
371 EXPORT_SYMBOL(inet_csk_init_xmit_timers);
372
373 void inet_csk_clear_xmit_timers(struct sock *sk)
374 {
375         struct inet_connection_sock *icsk = inet_csk(sk);
376
377         icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
378
379         sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
380         sk_stop_timer(sk, &icsk->icsk_delack_timer);
381         sk_stop_timer(sk, &sk->sk_timer);
382 }
383 EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
384
385 void inet_csk_delete_keepalive_timer(struct sock *sk)
386 {
387         sk_stop_timer(sk, &sk->sk_timer);
388 }
389 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
390
391 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
392 {
393         sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
394 }
395 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
396
397 struct dst_entry *inet_csk_route_req(struct sock *sk,
398                                      struct flowi4 *fl4,
399                                      const struct request_sock *req)
400 {
401         const struct inet_request_sock *ireq = inet_rsk(req);
402         struct net *net = read_pnet(&ireq->ireq_net);
403         struct ip_options_rcu *opt = ireq->opt;
404         struct rtable *rt;
405
406         flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
407                            RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
408                            sk->sk_protocol, inet_sk_flowi_flags(sk),
409                            (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
410                            ireq->ir_loc_addr, ireq->ir_rmt_port,
411                            htons(ireq->ir_num));
412         security_req_classify_flow(req, flowi4_to_flowi(fl4));
413         rt = ip_route_output_flow(net, fl4, sk);
414         if (IS_ERR(rt))
415                 goto no_route;
416         if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
417                 goto route_err;
418         return &rt->dst;
419
420 route_err:
421         ip_rt_put(rt);
422 no_route:
423         IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
424         return NULL;
425 }
426 EXPORT_SYMBOL_GPL(inet_csk_route_req);
427
428 struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
429                                             struct sock *newsk,
430                                             const struct request_sock *req)
431 {
432         const struct inet_request_sock *ireq = inet_rsk(req);
433         struct net *net = read_pnet(&ireq->ireq_net);
434         struct inet_sock *newinet = inet_sk(newsk);
435         struct ip_options_rcu *opt;
436         struct flowi4 *fl4;
437         struct rtable *rt;
438
439         fl4 = &newinet->cork.fl.u.ip4;
440
441         rcu_read_lock();
442         opt = rcu_dereference(newinet->inet_opt);
443         flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
444                            RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
445                            sk->sk_protocol, inet_sk_flowi_flags(sk),
446                            (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
447                            ireq->ir_loc_addr, ireq->ir_rmt_port,
448                            htons(ireq->ir_num));
449         security_req_classify_flow(req, flowi4_to_flowi(fl4));
450         rt = ip_route_output_flow(net, fl4, sk);
451         if (IS_ERR(rt))
452                 goto no_route;
453         if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
454                 goto route_err;
455         rcu_read_unlock();
456         return &rt->dst;
457
458 route_err:
459         ip_rt_put(rt);
460 no_route:
461         rcu_read_unlock();
462         IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
463         return NULL;
464 }
465 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
466
467 static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
468                                  const u32 rnd, const u32 synq_hsize)
469 {
470         return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
471 }
472
473 #if IS_ENABLED(CONFIG_IPV6)
474 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
475 #else
476 #define AF_INET_FAMILY(fam) true
477 #endif
478
479 /* Note: this is temporary :
480  * req sock will no longer be in listener hash table
481 */
482 struct request_sock *inet_csk_search_req(struct sock *sk,
483                                          const __be16 rport,
484                                          const __be32 raddr,
485                                          const __be32 laddr)
486 {
487         struct inet_connection_sock *icsk = inet_csk(sk);
488         struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
489         struct request_sock *req;
490         u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
491                                   lopt->nr_table_entries);
492
493         spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
494         for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
495                 const struct inet_request_sock *ireq = inet_rsk(req);
496
497                 if (ireq->ir_rmt_port == rport &&
498                     ireq->ir_rmt_addr == raddr &&
499                     ireq->ir_loc_addr == laddr &&
500                     AF_INET_FAMILY(req->rsk_ops->family)) {
501                         atomic_inc(&req->rsk_refcnt);
502                         WARN_ON(req->sk);
503                         break;
504                 }
505         }
506         spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
507
508         return req;
509 }
510 EXPORT_SYMBOL_GPL(inet_csk_search_req);
511
512 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
513                                    unsigned long timeout)
514 {
515         struct inet_connection_sock *icsk = inet_csk(sk);
516         struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
517         const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
518                                      inet_rsk(req)->ir_rmt_port,
519                                      lopt->hash_rnd, lopt->nr_table_entries);
520
521         reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
522         inet_csk_reqsk_queue_added(sk, timeout);
523 }
524 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
525
526 /* Only thing we need from tcp.h */
527 extern int sysctl_tcp_synack_retries;
528
529
530 /* Decide when to expire the request and when to resend SYN-ACK */
531 static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
532                                   const int max_retries,
533                                   const u8 rskq_defer_accept,
534                                   int *expire, int *resend)
535 {
536         if (!rskq_defer_accept) {
537                 *expire = req->num_timeout >= thresh;
538                 *resend = 1;
539                 return;
540         }
541         *expire = req->num_timeout >= thresh &&
542                   (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
543         /*
544          * Do not resend while waiting for data after ACK,
545          * start to resend on end of deferring period to give
546          * last chance for data or ACK to create established socket.
547          */
548         *resend = !inet_rsk(req)->acked ||
549                   req->num_timeout >= rskq_defer_accept - 1;
550 }
551
552 int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
553 {
554         int err = req->rsk_ops->rtx_syn_ack(parent, req);
555
556         if (!err)
557                 req->num_retrans++;
558         return err;
559 }
560 EXPORT_SYMBOL(inet_rtx_syn_ack);
561
562 /* return true if req was found in the syn_table[] */
563 static bool reqsk_queue_unlink(struct request_sock_queue *queue,
564                                struct request_sock *req)
565 {
566         struct listen_sock *lopt = queue->listen_opt;
567         struct request_sock **prev;
568         bool found = false;
569
570         spin_lock(&queue->syn_wait_lock);
571
572         for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
573              prev = &(*prev)->dl_next) {
574                 if (*prev == req) {
575                         *prev = req->dl_next;
576                         found = true;
577                         break;
578                 }
579         }
580
581         spin_unlock(&queue->syn_wait_lock);
582         if (del_timer(&req->rsk_timer))
583                 reqsk_put(req);
584         return found;
585 }
586
587 void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
588 {
589         if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
590                 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
591                 reqsk_put(req);
592         }
593 }
594 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
595
596 static void reqsk_timer_handler(unsigned long data)
597 {
598         struct request_sock *req = (struct request_sock *)data;
599         struct sock *sk_listener = req->rsk_listener;
600         struct inet_connection_sock *icsk = inet_csk(sk_listener);
601         struct request_sock_queue *queue = &icsk->icsk_accept_queue;
602         struct listen_sock *lopt = queue->listen_opt;
603         int qlen, expire = 0, resend = 0;
604         int max_retries, thresh;
605         u8 defer_accept;
606
607         if (sk_listener->sk_state != TCP_LISTEN || !lopt) {
608                 reqsk_put(req);
609                 return;
610         }
611
612         max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
613         thresh = max_retries;
614         /* Normally all the openreqs are young and become mature
615          * (i.e. converted to established socket) for first timeout.
616          * If synack was not acknowledged for 1 second, it means
617          * one of the following things: synack was lost, ack was lost,
618          * rtt is high or nobody planned to ack (i.e. synflood).
619          * When server is a bit loaded, queue is populated with old
620          * open requests, reducing effective size of queue.
621          * When server is well loaded, queue size reduces to zero
622          * after several minutes of work. It is not synflood,
623          * it is normal operation. The solution is pruning
624          * too old entries overriding normal timeout, when
625          * situation becomes dangerous.
626          *
627          * Essentially, we reserve half of room for young
628          * embrions; and abort old ones without pity, if old
629          * ones are about to clog our table.
630          */
631         qlen = listen_sock_qlen(lopt);
632         if (qlen >> (lopt->max_qlen_log - 1)) {
633                 int young = listen_sock_young(lopt) << 1;
634
635                 while (thresh > 2) {
636                         if (qlen < young)
637                                 break;
638                         thresh--;
639                         young <<= 1;
640                 }
641         }
642         defer_accept = READ_ONCE(queue->rskq_defer_accept);
643         if (defer_accept)
644                 max_retries = defer_accept;
645         syn_ack_recalc(req, thresh, max_retries, defer_accept,
646                        &expire, &resend);
647         req->rsk_ops->syn_ack_timeout(req);
648         if (!expire &&
649             (!resend ||
650              !inet_rtx_syn_ack(sk_listener, req) ||
651              inet_rsk(req)->acked)) {
652                 unsigned long timeo;
653
654                 if (req->num_timeout++ == 0)
655                         atomic_inc(&lopt->young_dec);
656                 timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
657                 mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
658                 return;
659         }
660         inet_csk_reqsk_queue_drop(sk_listener, req);
661         reqsk_put(req);
662 }
663
664 void reqsk_queue_hash_req(struct request_sock_queue *queue,
665                           u32 hash, struct request_sock *req,
666                           unsigned long timeout)
667 {
668         struct listen_sock *lopt = queue->listen_opt;
669
670         req->num_retrans = 0;
671         req->num_timeout = 0;
672         req->sk = NULL;
673
674         /* before letting lookups find us, make sure all req fields
675          * are committed to memory and refcnt initialized.
676          */
677         smp_wmb();
678         atomic_set(&req->rsk_refcnt, 2);
679         setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
680         req->rsk_hash = hash;
681
682         spin_lock(&queue->syn_wait_lock);
683         req->dl_next = lopt->syn_table[hash];
684         lopt->syn_table[hash] = req;
685         spin_unlock(&queue->syn_wait_lock);
686
687         mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
688 }
689 EXPORT_SYMBOL(reqsk_queue_hash_req);
690
691 /**
692  *      inet_csk_clone_lock - clone an inet socket, and lock its clone
693  *      @sk: the socket to clone
694  *      @req: request_sock
695  *      @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
696  *
697  *      Caller must unlock socket even in error path (bh_unlock_sock(newsk))
698  */
699 struct sock *inet_csk_clone_lock(const struct sock *sk,
700                                  const struct request_sock *req,
701                                  const gfp_t priority)
702 {
703         struct sock *newsk = sk_clone_lock(sk, priority);
704
705         if (newsk) {
706                 struct inet_connection_sock *newicsk = inet_csk(newsk);
707
708                 newsk->sk_state = TCP_SYN_RECV;
709                 newicsk->icsk_bind_hash = NULL;
710
711                 inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
712                 inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
713                 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
714                 newsk->sk_write_space = sk_stream_write_space;
715
716                 newsk->sk_mark = inet_rsk(req)->ir_mark;
717                 atomic64_set(&newsk->sk_cookie,
718                              atomic64_read(&inet_rsk(req)->ir_cookie));
719
720                 newicsk->icsk_retransmits = 0;
721                 newicsk->icsk_backoff     = 0;
722                 newicsk->icsk_probes_out  = 0;
723
724                 /* Deinitialize accept_queue to trap illegal accesses. */
725                 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
726
727                 security_inet_csk_clone(newsk, req);
728         }
729         return newsk;
730 }
731 EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
732
733 /*
734  * At this point, there should be no process reference to this
735  * socket, and thus no user references at all.  Therefore we
736  * can assume the socket waitqueue is inactive and nobody will
737  * try to jump onto it.
738  */
739 void inet_csk_destroy_sock(struct sock *sk)
740 {
741         WARN_ON(sk->sk_state != TCP_CLOSE);
742         WARN_ON(!sock_flag(sk, SOCK_DEAD));
743
744         /* It cannot be in hash table! */
745         WARN_ON(!sk_unhashed(sk));
746
747         /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
748         WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
749
750         sk->sk_prot->destroy(sk);
751
752         sk_stream_kill_queues(sk);
753
754         xfrm_sk_free_policy(sk);
755
756         sk_refcnt_debug_release(sk);
757
758         percpu_counter_dec(sk->sk_prot->orphan_count);
759         sock_put(sk);
760 }
761 EXPORT_SYMBOL(inet_csk_destroy_sock);
762
763 /* This function allows to force a closure of a socket after the call to
764  * tcp/dccp_create_openreq_child().
765  */
766 void inet_csk_prepare_forced_close(struct sock *sk)
767         __releases(&sk->sk_lock.slock)
768 {
769         /* sk_clone_lock locked the socket and set refcnt to 2 */
770         bh_unlock_sock(sk);
771         sock_put(sk);
772
773         /* The below has to be done to allow calling inet_csk_destroy_sock */
774         sock_set_flag(sk, SOCK_DEAD);
775         percpu_counter_inc(sk->sk_prot->orphan_count);
776         inet_sk(sk)->inet_num = 0;
777 }
778 EXPORT_SYMBOL(inet_csk_prepare_forced_close);
779
780 int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
781 {
782         struct inet_sock *inet = inet_sk(sk);
783         struct inet_connection_sock *icsk = inet_csk(sk);
784         int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
785
786         if (rc != 0)
787                 return rc;
788
789         sk->sk_max_ack_backlog = 0;
790         sk->sk_ack_backlog = 0;
791         inet_csk_delack_init(sk);
792
793         /* There is race window here: we announce ourselves listening,
794          * but this transition is still not validated by get_port().
795          * It is OK, because this socket enters to hash table only
796          * after validation is complete.
797          */
798         sk->sk_state = TCP_LISTEN;
799         if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
800                 inet->inet_sport = htons(inet->inet_num);
801
802                 sk_dst_reset(sk);
803                 sk->sk_prot->hash(sk);
804
805                 return 0;
806         }
807
808         sk->sk_state = TCP_CLOSE;
809         __reqsk_queue_destroy(&icsk->icsk_accept_queue);
810         return -EADDRINUSE;
811 }
812 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
813
814 /*
815  *      This routine closes sockets which have been at least partially
816  *      opened, but not yet accepted.
817  */
818 void inet_csk_listen_stop(struct sock *sk)
819 {
820         struct inet_connection_sock *icsk = inet_csk(sk);
821         struct request_sock_queue *queue = &icsk->icsk_accept_queue;
822         struct request_sock *acc_req;
823         struct request_sock *req;
824
825         /* make all the listen_opt local to us */
826         acc_req = reqsk_queue_yank_acceptq(queue);
827
828         /* Following specs, it would be better either to send FIN
829          * (and enter FIN-WAIT-1, it is normal close)
830          * or to send active reset (abort).
831          * Certainly, it is pretty dangerous while synflood, but it is
832          * bad justification for our negligence 8)
833          * To be honest, we are not able to make either
834          * of the variants now.                 --ANK
835          */
836         reqsk_queue_destroy(queue);
837
838         while ((req = acc_req) != NULL) {
839                 struct sock *child = req->sk;
840
841                 acc_req = req->dl_next;
842
843                 local_bh_disable();
844                 bh_lock_sock(child);
845                 WARN_ON(sock_owned_by_user(child));
846                 sock_hold(child);
847
848                 sk->sk_prot->disconnect(child, O_NONBLOCK);
849
850                 sock_orphan(child);
851
852                 percpu_counter_inc(sk->sk_prot->orphan_count);
853
854                 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
855                         BUG_ON(tcp_sk(child)->fastopen_rsk != req);
856                         BUG_ON(sk != req->rsk_listener);
857
858                         /* Paranoid, to prevent race condition if
859                          * an inbound pkt destined for child is
860                          * blocked by sock lock in tcp_v4_rcv().
861                          * Also to satisfy an assertion in
862                          * tcp_v4_destroy_sock().
863                          */
864                         tcp_sk(child)->fastopen_rsk = NULL;
865                 }
866                 inet_csk_destroy_sock(child);
867
868                 bh_unlock_sock(child);
869                 local_bh_enable();
870                 sock_put(child);
871
872                 sk_acceptq_removed(sk);
873                 reqsk_put(req);
874         }
875         if (queue->fastopenq) {
876                 /* Free all the reqs queued in rskq_rst_head. */
877                 spin_lock_bh(&queue->fastopenq->lock);
878                 acc_req = queue->fastopenq->rskq_rst_head;
879                 queue->fastopenq->rskq_rst_head = NULL;
880                 spin_unlock_bh(&queue->fastopenq->lock);
881                 while ((req = acc_req) != NULL) {
882                         acc_req = req->dl_next;
883                         reqsk_put(req);
884                 }
885         }
886         WARN_ON(sk->sk_ack_backlog);
887 }
888 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
889
890 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
891 {
892         struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
893         const struct inet_sock *inet = inet_sk(sk);
894
895         sin->sin_family         = AF_INET;
896         sin->sin_addr.s_addr    = inet->inet_daddr;
897         sin->sin_port           = inet->inet_dport;
898 }
899 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
900
901 #ifdef CONFIG_COMPAT
902 int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
903                                char __user *optval, int __user *optlen)
904 {
905         const struct inet_connection_sock *icsk = inet_csk(sk);
906
907         if (icsk->icsk_af_ops->compat_getsockopt)
908                 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
909                                                             optval, optlen);
910         return icsk->icsk_af_ops->getsockopt(sk, level, optname,
911                                              optval, optlen);
912 }
913 EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
914
915 int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
916                                char __user *optval, unsigned int optlen)
917 {
918         const struct inet_connection_sock *icsk = inet_csk(sk);
919
920         if (icsk->icsk_af_ops->compat_setsockopt)
921                 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
922                                                             optval, optlen);
923         return icsk->icsk_af_ops->setsockopt(sk, level, optname,
924                                              optval, optlen);
925 }
926 EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
927 #endif
928
929 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
930 {
931         const struct inet_sock *inet = inet_sk(sk);
932         const struct ip_options_rcu *inet_opt;
933         __be32 daddr = inet->inet_daddr;
934         struct flowi4 *fl4;
935         struct rtable *rt;
936
937         rcu_read_lock();
938         inet_opt = rcu_dereference(inet->inet_opt);
939         if (inet_opt && inet_opt->opt.srr)
940                 daddr = inet_opt->opt.faddr;
941         fl4 = &fl->u.ip4;
942         rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
943                                    inet->inet_saddr, inet->inet_dport,
944                                    inet->inet_sport, sk->sk_protocol,
945                                    RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
946         if (IS_ERR(rt))
947                 rt = NULL;
948         if (rt)
949                 sk_setup_caps(sk, &rt->dst);
950         rcu_read_unlock();
951
952         return &rt->dst;
953 }
954
955 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
956 {
957         struct dst_entry *dst = __sk_dst_check(sk, 0);
958         struct inet_sock *inet = inet_sk(sk);
959
960         if (!dst) {
961                 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
962                 if (!dst)
963                         goto out;
964         }
965         dst->ops->update_pmtu(dst, sk, NULL, mtu);
966
967         dst = __sk_dst_check(sk, 0);
968         if (!dst)
969                 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
970 out:
971         return dst;
972 }
973 EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);