2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
106 tcp_hdr(skb)->source);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
147 __be16 orig_sport, orig_dport;
148 __be32 daddr, nexthop;
152 struct ip_options_rcu *inet_opt;
154 if (addr_len < sizeof(struct sockaddr_in))
157 if (usin->sin_family != AF_INET)
158 return -EAFNOSUPPORT;
160 nexthop = daddr = usin->sin_addr.s_addr;
161 inet_opt = rcu_dereference_protected(inet->inet_opt,
162 sock_owned_by_user(sk));
163 if (inet_opt && inet_opt->opt.srr) {
166 nexthop = inet_opt->opt.faddr;
169 orig_sport = inet->inet_sport;
170 orig_dport = usin->sin_port;
171 fl4 = &inet->cork.fl.u.ip4;
172 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
175 orig_sport, orig_dport, sk, true);
178 if (err == -ENETUNREACH)
179 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
188 if (!inet_opt || !inet_opt->opt.srr)
191 if (!inet->inet_saddr)
192 inet->inet_saddr = fl4->saddr;
193 inet->inet_rcv_saddr = inet->inet_saddr;
195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
196 /* Reset inherited state */
197 tp->rx_opt.ts_recent = 0;
198 tp->rx_opt.ts_recent_stamp = 0;
199 if (likely(!tp->repair))
203 if (tcp_death_row.sysctl_tw_recycle &&
204 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
205 tcp_fetch_timewait_stamp(sk, &rt->dst);
207 inet->inet_dport = usin->sin_port;
208 inet->inet_daddr = daddr;
210 inet_csk(sk)->icsk_ext_hdr_len = 0;
212 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
214 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
216 /* Socket identity is still unknown (sport may be zero).
217 * However we set state to SYN-SENT and not releasing socket
218 * lock select source port, enter ourselves into the hash tables and
219 * complete initialization after this.
221 tcp_set_state(sk, TCP_SYN_SENT);
222 err = inet_hash_connect(&tcp_death_row, sk);
226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
227 inet->inet_sport, inet->inet_dport, sk);
233 /* OK, now commit destination to socket. */
234 sk->sk_gso_type = SKB_GSO_TCPV4;
235 sk_setup_caps(sk, &rt->dst);
237 if (!tp->write_seq && likely(!tp->repair))
238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
243 inet->inet_id = tp->write_seq ^ jiffies;
245 err = tcp_connect(sk);
255 * This unhashes the socket and releases the local port,
258 tcp_set_state(sk, TCP_CLOSE);
260 sk->sk_route_caps = 0;
261 inet->inet_dport = 0;
264 EXPORT_SYMBOL(tcp_v4_connect);
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
271 static void tcp_v4_mtu_reduced(struct sock *sk)
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
275 u32 mtu = tcp_sk(sk)->mtu_info;
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
281 if (sk->sk_state == TCP_LISTEN)
284 dst = inet_csk_update_pmtu(sk, mtu);
288 /* Something is about to be wrong... Remember soft error
289 * for the case, if this connection will not able to recover.
291 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
292 sk->sk_err_soft = EMSGSIZE;
296 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
297 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
298 tcp_sync_mss(sk, mtu);
300 /* Resend the TCP packet because it's
301 * clear that the old packet has been
302 * dropped. This is the new "fast" path mtu
305 tcp_simple_retransmit(sk);
306 } /* else let the usual retransmit timer handle it */
309 static void do_redirect(struct sk_buff *skb, struct sock *sk)
311 struct dst_entry *dst = __sk_dst_check(sk, 0);
314 dst->ops->redirect(dst, sk, skb);
318 * This routine is called by the ICMP module when it gets some
319 * sort of error condition. If err < 0 then the socket should
320 * be closed and the error returned to the user. If err > 0
321 * it's just the icmp type << 8 | icmp code. After adjustment
322 * header points to the first 8 bytes of the tcp header. We need
323 * to find the appropriate port.
325 * The locking strategy used here is very "optimistic". When
326 * someone else accesses the socket the ICMP is just dropped
327 * and for some paths there is no check at all.
328 * A more general error queue to queue errors for later handling
329 * is probably better.
333 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
335 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
336 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
337 struct inet_connection_sock *icsk;
339 struct inet_sock *inet;
340 const int type = icmp_hdr(icmp_skb)->type;
341 const int code = icmp_hdr(icmp_skb)->code;
344 struct request_sock *req;
348 struct net *net = dev_net(icmp_skb->dev);
350 if (icmp_skb->len < (iph->ihl << 2) + 8) {
351 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
355 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
356 iph->saddr, th->source, inet_iif(icmp_skb));
358 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
361 if (sk->sk_state == TCP_TIME_WAIT) {
362 inet_twsk_put(inet_twsk(sk));
367 /* If too many ICMPs get dropped on busy
368 * servers this needs to be solved differently.
369 * We do take care of PMTU discovery (RFC1191) special case :
370 * we can receive locally generated ICMP messages while socket is held.
372 if (sock_owned_by_user(sk)) {
373 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
374 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
376 if (sk->sk_state == TCP_CLOSE)
379 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
380 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
386 req = tp->fastopen_rsk;
387 seq = ntohl(th->seq);
388 if (sk->sk_state != TCP_LISTEN &&
389 !between(seq, tp->snd_una, tp->snd_nxt) &&
390 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
391 /* For a Fast Open socket, allow seq to be snt_isn. */
392 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
398 do_redirect(icmp_skb, sk);
400 case ICMP_SOURCE_QUENCH:
401 /* Just silently ignore these. */
403 case ICMP_PARAMETERPROB:
406 case ICMP_DEST_UNREACH:
407 if (code > NR_ICMP_UNREACH)
410 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
412 if (!sock_owned_by_user(sk)) {
413 tcp_v4_mtu_reduced(sk);
415 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
421 err = icmp_err_convert[code].errno;
422 /* check if icmp_skb allows revert of backoff
423 * (see draft-zimmermann-tcp-lcd) */
424 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
426 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
430 /* XXX (TFO) - revisit the following logic for TFO */
432 if (sock_owned_by_user(sk))
435 icsk->icsk_backoff--;
436 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
437 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
440 skb = tcp_write_queue_head(sk);
443 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
444 tcp_time_stamp - TCP_SKB_CB(skb)->when);
447 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
448 remaining, TCP_RTO_MAX);
450 /* RTO revert clocked out retransmission.
451 * Will retransmit now */
452 tcp_retransmit_timer(sk);
456 case ICMP_TIME_EXCEEDED:
463 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
464 * than following the TCP_SYN_RECV case and closing the socket,
465 * we ignore the ICMP error and keep trying like a fully established
466 * socket. Is this the right thing to do?
468 if (req && req->sk == NULL)
471 switch (sk->sk_state) {
472 struct request_sock *req, **prev;
474 if (sock_owned_by_user(sk))
477 req = inet_csk_search_req(sk, &prev, th->dest,
478 iph->daddr, iph->saddr);
482 /* ICMPs are not backlogged, hence we cannot get
483 an established socket here.
487 if (seq != tcp_rsk(req)->snt_isn) {
488 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
493 * Still in SYN_RECV, just remove it silently.
494 * There is no good way to pass the error to the newly
495 * created socket, and POSIX does not want network
496 * errors returned from accept().
498 inet_csk_reqsk_queue_drop(sk, req, prev);
499 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
503 case TCP_SYN_RECV: /* Cannot happen.
504 It can f.e. if SYNs crossed,
507 if (!sock_owned_by_user(sk)) {
510 sk->sk_error_report(sk);
514 sk->sk_err_soft = err;
519 /* If we've already connected we will keep trying
520 * until we time out, or the user gives up.
522 * rfc1122 4.2.3.9 allows to consider as hard errors
523 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
524 * but it is obsoleted by pmtu discovery).
526 * Note, that in modern internet, where routing is unreliable
527 * and in each dark corner broken firewalls sit, sending random
528 * errors ordered by their masters even this two messages finally lose
529 * their original sense (even Linux sends invalid PORT_UNREACHs)
531 * Now we are in compliance with RFCs.
536 if (!sock_owned_by_user(sk) && inet->recverr) {
538 sk->sk_error_report(sk);
539 } else { /* Only an error on timeout */
540 sk->sk_err_soft = err;
548 static void __tcp_v4_send_check(struct sk_buff *skb,
549 __be32 saddr, __be32 daddr)
551 struct tcphdr *th = tcp_hdr(skb);
553 if (skb->ip_summed == CHECKSUM_PARTIAL) {
554 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
555 skb->csum_start = skb_transport_header(skb) - skb->head;
556 skb->csum_offset = offsetof(struct tcphdr, check);
558 th->check = tcp_v4_check(skb->len, saddr, daddr,
565 /* This routine computes an IPv4 TCP checksum. */
566 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
568 const struct inet_sock *inet = inet_sk(sk);
570 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
572 EXPORT_SYMBOL(tcp_v4_send_check);
574 int tcp_v4_gso_send_check(struct sk_buff *skb)
576 const struct iphdr *iph;
579 if (!pskb_may_pull(skb, sizeof(*th)))
586 skb->ip_summed = CHECKSUM_PARTIAL;
587 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
592 * This routine will send an RST to the other tcp.
594 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
596 * Answer: if a packet caused RST, it is not for a socket
597 * existing in our system, if it is matched to a socket,
598 * it is just duplicate segment or bug in other side's TCP.
599 * So that we build reply only basing on parameters
600 * arrived with segment.
601 * Exception: precedence violation. We do not implement it in any case.
604 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
606 const struct tcphdr *th = tcp_hdr(skb);
609 #ifdef CONFIG_TCP_MD5SIG
610 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
613 struct ip_reply_arg arg;
614 #ifdef CONFIG_TCP_MD5SIG
615 struct tcp_md5sig_key *key;
616 const __u8 *hash_location = NULL;
617 unsigned char newhash[16];
619 struct sock *sk1 = NULL;
623 /* Never send a reset in response to a reset. */
627 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
630 /* Swap the send and the receive. */
631 memset(&rep, 0, sizeof(rep));
632 rep.th.dest = th->source;
633 rep.th.source = th->dest;
634 rep.th.doff = sizeof(struct tcphdr) / 4;
638 rep.th.seq = th->ack_seq;
641 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
642 skb->len - (th->doff << 2));
645 memset(&arg, 0, sizeof(arg));
646 arg.iov[0].iov_base = (unsigned char *)&rep;
647 arg.iov[0].iov_len = sizeof(rep.th);
649 #ifdef CONFIG_TCP_MD5SIG
650 hash_location = tcp_parse_md5sig_option(th);
651 if (!sk && hash_location) {
653 * active side is lost. Try to find listening socket through
654 * source port, and then find md5 key through listening socket.
655 * we are not loose security here:
656 * Incoming packet is checked with md5 hash with finding key,
657 * no RST generated if md5 hash doesn't match.
659 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
660 &tcp_hashinfo, ip_hdr(skb)->saddr,
661 th->source, ip_hdr(skb)->daddr,
662 ntohs(th->source), inet_iif(skb));
663 /* don't send rst if it can't find key */
667 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
668 &ip_hdr(skb)->saddr, AF_INET);
672 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
673 if (genhash || memcmp(hash_location, newhash, 16) != 0)
676 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
682 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
684 (TCPOPT_MD5SIG << 8) |
686 /* Update length and the length the header thinks exists */
687 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
688 rep.th.doff = arg.iov[0].iov_len / 4;
690 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
691 key, ip_hdr(skb)->saddr,
692 ip_hdr(skb)->daddr, &rep.th);
695 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
696 ip_hdr(skb)->saddr, /* XXX */
697 arg.iov[0].iov_len, IPPROTO_TCP, 0);
698 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
699 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
700 /* When socket is gone, all binding information is lost.
701 * routing might fail in this case. No choice here, if we choose to force
702 * input interface, we will misroute in case of asymmetric route.
705 arg.bound_dev_if = sk->sk_bound_dev_if;
707 net = dev_net(skb_dst(skb)->dev);
708 arg.tos = ip_hdr(skb)->tos;
709 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
710 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
712 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
713 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
715 #ifdef CONFIG_TCP_MD5SIG
724 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
725 outside socket context is ugly, certainly. What can I do?
728 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
729 u32 win, u32 tsval, u32 tsecr, int oif,
730 struct tcp_md5sig_key *key,
731 int reply_flags, u8 tos)
733 const struct tcphdr *th = tcp_hdr(skb);
736 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
737 #ifdef CONFIG_TCP_MD5SIG
738 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
742 struct ip_reply_arg arg;
743 struct net *net = dev_net(skb_dst(skb)->dev);
745 memset(&rep.th, 0, sizeof(struct tcphdr));
746 memset(&arg, 0, sizeof(arg));
748 arg.iov[0].iov_base = (unsigned char *)&rep;
749 arg.iov[0].iov_len = sizeof(rep.th);
751 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
752 (TCPOPT_TIMESTAMP << 8) |
754 rep.opt[1] = htonl(tsval);
755 rep.opt[2] = htonl(tsecr);
756 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
759 /* Swap the send and the receive. */
760 rep.th.dest = th->source;
761 rep.th.source = th->dest;
762 rep.th.doff = arg.iov[0].iov_len / 4;
763 rep.th.seq = htonl(seq);
764 rep.th.ack_seq = htonl(ack);
766 rep.th.window = htons(win);
768 #ifdef CONFIG_TCP_MD5SIG
770 int offset = (tsecr) ? 3 : 0;
772 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
774 (TCPOPT_MD5SIG << 8) |
776 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
777 rep.th.doff = arg.iov[0].iov_len/4;
779 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
780 key, ip_hdr(skb)->saddr,
781 ip_hdr(skb)->daddr, &rep.th);
784 arg.flags = reply_flags;
785 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
786 ip_hdr(skb)->saddr, /* XXX */
787 arg.iov[0].iov_len, IPPROTO_TCP, 0);
788 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
790 arg.bound_dev_if = oif;
792 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
793 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
795 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
798 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
800 struct inet_timewait_sock *tw = inet_twsk(sk);
801 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
803 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
804 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
805 tcp_time_stamp + tcptw->tw_ts_offset,
808 tcp_twsk_md5_key(tcptw),
809 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
816 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
817 struct request_sock *req)
819 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
820 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
822 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
823 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
824 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
828 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
830 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
835 * Send a SYN-ACK after having received a SYN.
836 * This still operates on a request_sock only, not on a big
839 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
840 struct request_sock *req,
841 struct request_values *rvp,
845 const struct inet_request_sock *ireq = inet_rsk(req);
848 struct sk_buff * skb;
850 /* First, grab a route. */
851 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
854 skb = tcp_make_synack(sk, dst, req, rvp, NULL);
857 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
859 skb_set_queue_mapping(skb, queue_mapping);
860 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
863 err = net_xmit_eval(err);
864 if (!tcp_rsk(req)->snt_synack && !err)
865 tcp_rsk(req)->snt_synack = tcp_time_stamp;
871 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
872 struct request_values *rvp)
874 int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
877 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
882 * IPv4 request_sock destructor.
884 static void tcp_v4_reqsk_destructor(struct request_sock *req)
886 kfree(inet_rsk(req)->opt);
890 * Return true if a syncookie should be sent
892 bool tcp_syn_flood_action(struct sock *sk,
893 const struct sk_buff *skb,
896 const char *msg = "Dropping request";
897 bool want_cookie = false;
898 struct listen_sock *lopt;
902 #ifdef CONFIG_SYN_COOKIES
903 if (sysctl_tcp_syncookies) {
904 msg = "Sending cookies";
906 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
909 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
911 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
912 if (!lopt->synflood_warned) {
913 lopt->synflood_warned = 1;
914 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
915 proto, ntohs(tcp_hdr(skb)->dest), msg);
919 EXPORT_SYMBOL(tcp_syn_flood_action);
922 * Save and compile IPv4 options into the request_sock if needed.
924 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
926 const struct ip_options *opt = &(IPCB(skb)->opt);
927 struct ip_options_rcu *dopt = NULL;
929 if (opt && opt->optlen) {
930 int opt_size = sizeof(*dopt) + opt->optlen;
932 dopt = kmalloc(opt_size, GFP_ATOMIC);
934 if (ip_options_echo(&dopt->opt, skb)) {
943 #ifdef CONFIG_TCP_MD5SIG
945 * RFC2385 MD5 checksumming requires a mapping of
946 * IP address->MD5 Key.
947 * We need to maintain these in the sk structure.
950 /* Find the Key structure for an address. */
951 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
952 const union tcp_md5_addr *addr,
955 struct tcp_sock *tp = tcp_sk(sk);
956 struct tcp_md5sig_key *key;
957 unsigned int size = sizeof(struct in_addr);
958 struct tcp_md5sig_info *md5sig;
960 /* caller either holds rcu_read_lock() or socket lock */
961 md5sig = rcu_dereference_check(tp->md5sig_info,
962 sock_owned_by_user(sk) ||
963 lockdep_is_held(&sk->sk_lock.slock));
966 #if IS_ENABLED(CONFIG_IPV6)
967 if (family == AF_INET6)
968 size = sizeof(struct in6_addr);
970 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
971 if (key->family != family)
973 if (!memcmp(&key->addr, addr, size))
978 EXPORT_SYMBOL(tcp_md5_do_lookup);
980 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
981 struct sock *addr_sk)
983 union tcp_md5_addr *addr;
985 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
986 return tcp_md5_do_lookup(sk, addr, AF_INET);
988 EXPORT_SYMBOL(tcp_v4_md5_lookup);
990 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
991 struct request_sock *req)
993 union tcp_md5_addr *addr;
995 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
996 return tcp_md5_do_lookup(sk, addr, AF_INET);
999 /* This can be called on a newly created socket, from other files */
1000 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1001 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
1003 /* Add Key to the list */
1004 struct tcp_md5sig_key *key;
1005 struct tcp_sock *tp = tcp_sk(sk);
1006 struct tcp_md5sig_info *md5sig;
1008 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1010 /* Pre-existing entry - just update that one. */
1011 memcpy(key->key, newkey, newkeylen);
1012 key->keylen = newkeylen;
1016 md5sig = rcu_dereference_protected(tp->md5sig_info,
1017 sock_owned_by_user(sk));
1019 md5sig = kmalloc(sizeof(*md5sig), gfp);
1023 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1024 INIT_HLIST_HEAD(&md5sig->head);
1025 rcu_assign_pointer(tp->md5sig_info, md5sig);
1028 key = sock_kmalloc(sk, sizeof(*key), gfp);
1031 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1032 sock_kfree_s(sk, key, sizeof(*key));
1036 memcpy(key->key, newkey, newkeylen);
1037 key->keylen = newkeylen;
1038 key->family = family;
1039 memcpy(&key->addr, addr,
1040 (family == AF_INET6) ? sizeof(struct in6_addr) :
1041 sizeof(struct in_addr));
1042 hlist_add_head_rcu(&key->node, &md5sig->head);
1045 EXPORT_SYMBOL(tcp_md5_do_add);
1047 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1049 struct tcp_sock *tp = tcp_sk(sk);
1050 struct tcp_md5sig_key *key;
1051 struct tcp_md5sig_info *md5sig;
1053 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1056 hlist_del_rcu(&key->node);
1057 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1058 kfree_rcu(key, rcu);
1059 md5sig = rcu_dereference_protected(tp->md5sig_info,
1060 sock_owned_by_user(sk));
1061 if (hlist_empty(&md5sig->head))
1062 tcp_free_md5sig_pool();
1065 EXPORT_SYMBOL(tcp_md5_do_del);
1067 static void tcp_clear_md5_list(struct sock *sk)
1069 struct tcp_sock *tp = tcp_sk(sk);
1070 struct tcp_md5sig_key *key;
1071 struct hlist_node *n;
1072 struct tcp_md5sig_info *md5sig;
1074 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1076 if (!hlist_empty(&md5sig->head))
1077 tcp_free_md5sig_pool();
1078 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1079 hlist_del_rcu(&key->node);
1080 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1081 kfree_rcu(key, rcu);
1085 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1088 struct tcp_md5sig cmd;
1089 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1091 if (optlen < sizeof(cmd))
1094 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1097 if (sin->sin_family != AF_INET)
1100 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1101 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1104 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1107 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1108 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1112 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1113 __be32 daddr, __be32 saddr, int nbytes)
1115 struct tcp4_pseudohdr *bp;
1116 struct scatterlist sg;
1118 bp = &hp->md5_blk.ip4;
1121 * 1. the TCP pseudo-header (in the order: source IP address,
1122 * destination IP address, zero-padded protocol number, and
1128 bp->protocol = IPPROTO_TCP;
1129 bp->len = cpu_to_be16(nbytes);
1131 sg_init_one(&sg, bp, sizeof(*bp));
1132 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1135 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1136 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1138 struct tcp_md5sig_pool *hp;
1139 struct hash_desc *desc;
1141 hp = tcp_get_md5sig_pool();
1143 goto clear_hash_noput;
1144 desc = &hp->md5_desc;
1146 if (crypto_hash_init(desc))
1148 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1150 if (tcp_md5_hash_header(hp, th))
1152 if (tcp_md5_hash_key(hp, key))
1154 if (crypto_hash_final(desc, md5_hash))
1157 tcp_put_md5sig_pool();
1161 tcp_put_md5sig_pool();
1163 memset(md5_hash, 0, 16);
1167 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1168 const struct sock *sk, const struct request_sock *req,
1169 const struct sk_buff *skb)
1171 struct tcp_md5sig_pool *hp;
1172 struct hash_desc *desc;
1173 const struct tcphdr *th = tcp_hdr(skb);
1174 __be32 saddr, daddr;
1177 saddr = inet_sk(sk)->inet_saddr;
1178 daddr = inet_sk(sk)->inet_daddr;
1180 saddr = inet_rsk(req)->loc_addr;
1181 daddr = inet_rsk(req)->rmt_addr;
1183 const struct iphdr *iph = ip_hdr(skb);
1188 hp = tcp_get_md5sig_pool();
1190 goto clear_hash_noput;
1191 desc = &hp->md5_desc;
1193 if (crypto_hash_init(desc))
1196 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1198 if (tcp_md5_hash_header(hp, th))
1200 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1202 if (tcp_md5_hash_key(hp, key))
1204 if (crypto_hash_final(desc, md5_hash))
1207 tcp_put_md5sig_pool();
1211 tcp_put_md5sig_pool();
1213 memset(md5_hash, 0, 16);
1216 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1218 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1221 * This gets called for each TCP segment that arrives
1222 * so we want to be efficient.
1223 * We have 3 drop cases:
1224 * o No MD5 hash and one expected.
1225 * o MD5 hash and we're not expecting one.
1226 * o MD5 hash and its wrong.
1228 const __u8 *hash_location = NULL;
1229 struct tcp_md5sig_key *hash_expected;
1230 const struct iphdr *iph = ip_hdr(skb);
1231 const struct tcphdr *th = tcp_hdr(skb);
1233 unsigned char newhash[16];
1235 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1237 hash_location = tcp_parse_md5sig_option(th);
1239 /* We've parsed the options - do we have a hash? */
1240 if (!hash_expected && !hash_location)
1243 if (hash_expected && !hash_location) {
1244 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1248 if (!hash_expected && hash_location) {
1249 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1253 /* Okay, so this is hash_expected and hash_location -
1254 * so we need to calculate the checksum.
1256 genhash = tcp_v4_md5_hash_skb(newhash,
1260 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1261 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1262 &iph->saddr, ntohs(th->source),
1263 &iph->daddr, ntohs(th->dest),
1264 genhash ? " tcp_v4_calc_md5_hash failed"
1273 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1275 .obj_size = sizeof(struct tcp_request_sock),
1276 .rtx_syn_ack = tcp_v4_rtx_synack,
1277 .send_ack = tcp_v4_reqsk_send_ack,
1278 .destructor = tcp_v4_reqsk_destructor,
1279 .send_reset = tcp_v4_send_reset,
1280 .syn_ack_timeout = tcp_syn_ack_timeout,
1283 #ifdef CONFIG_TCP_MD5SIG
1284 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1285 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1286 .calc_md5_hash = tcp_v4_md5_hash_skb,
1290 static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1291 struct request_sock *req,
1292 struct tcp_fastopen_cookie *foc,
1293 struct tcp_fastopen_cookie *valid_foc)
1295 bool skip_cookie = false;
1296 struct fastopen_queue *fastopenq;
1298 if (likely(!fastopen_cookie_present(foc))) {
1299 /* See include/net/tcp.h for the meaning of these knobs */
1300 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1301 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1302 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1303 skip_cookie = true; /* no cookie to validate */
1307 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1308 /* A FO option is present; bump the counter. */
1309 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1311 /* Make sure the listener has enabled fastopen, and we don't
1312 * exceed the max # of pending TFO requests allowed before trying
1313 * to validating the cookie in order to avoid burning CPU cycles
1316 * XXX (TFO) - The implication of checking the max_qlen before
1317 * processing a cookie request is that clients can't differentiate
1318 * between qlen overflow causing Fast Open to be disabled
1319 * temporarily vs a server not supporting Fast Open at all.
1321 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1322 fastopenq == NULL || fastopenq->max_qlen == 0)
1325 if (fastopenq->qlen >= fastopenq->max_qlen) {
1326 struct request_sock *req1;
1327 spin_lock(&fastopenq->lock);
1328 req1 = fastopenq->rskq_rst_head;
1329 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1330 spin_unlock(&fastopenq->lock);
1331 NET_INC_STATS_BH(sock_net(sk),
1332 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1333 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1337 fastopenq->rskq_rst_head = req1->dl_next;
1339 spin_unlock(&fastopenq->lock);
1343 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1346 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1347 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1348 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1349 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1350 memcmp(&foc->val[0], &valid_foc->val[0],
1351 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1353 valid_foc->len = -1;
1355 /* Acknowledge the data received from the peer. */
1356 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1358 } else if (foc->len == 0) { /* Client requesting a cookie */
1359 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1360 NET_INC_STATS_BH(sock_net(sk),
1361 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1363 /* Client sent a cookie with wrong size. Treat it
1364 * the same as invalid and return a valid one.
1366 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1371 static int tcp_v4_conn_req_fastopen(struct sock *sk,
1372 struct sk_buff *skb,
1373 struct sk_buff *skb_synack,
1374 struct request_sock *req,
1375 struct request_values *rvp)
1377 struct tcp_sock *tp = tcp_sk(sk);
1378 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1379 const struct inet_request_sock *ireq = inet_rsk(req);
1383 req->num_retrans = 0;
1384 req->num_timeout = 0;
1387 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1388 if (child == NULL) {
1389 NET_INC_STATS_BH(sock_net(sk),
1390 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1391 kfree_skb(skb_synack);
1394 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1395 ireq->rmt_addr, ireq->opt);
1396 err = net_xmit_eval(err);
1398 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1399 /* XXX (TFO) - is it ok to ignore error and continue? */
1401 spin_lock(&queue->fastopenq->lock);
1402 queue->fastopenq->qlen++;
1403 spin_unlock(&queue->fastopenq->lock);
1405 /* Initialize the child socket. Have to fix some values to take
1406 * into account the child is a Fast Open socket and is created
1407 * only out of the bits carried in the SYN packet.
1411 tp->fastopen_rsk = req;
1412 /* Do a hold on the listner sk so that if the listener is being
1413 * closed, the child that has been accepted can live on and still
1414 * access listen_lock.
1417 tcp_rsk(req)->listener = sk;
1419 /* RFC1323: The window in SYN & SYN/ACK segments is never
1420 * scaled. So correct it appropriately.
1422 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1424 /* Activate the retrans timer so that SYNACK can be retransmitted.
1425 * The request socket is not added to the SYN table of the parent
1426 * because it's been added to the accept queue directly.
1428 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1429 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1431 /* Add the child socket directly into the accept queue */
1432 inet_csk_reqsk_queue_add(sk, req, child);
1434 /* Now finish processing the fastopen child socket. */
1435 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1436 tcp_init_congestion_control(child);
1437 tcp_mtup_init(child);
1438 tcp_init_buffer_space(child);
1439 tcp_init_metrics(child);
1441 /* Queue the data carried in the SYN packet. We need to first
1442 * bump skb's refcnt because the caller will attempt to free it.
1444 * XXX (TFO) - we honor a zero-payload TFO request for now.
1445 * (Any reason not to?)
1447 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1448 /* Don't queue the skb if there is no payload in SYN.
1449 * XXX (TFO) - How about SYN+FIN?
1451 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1455 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1456 skb_set_owner_r(skb, child);
1457 __skb_queue_tail(&child->sk_receive_queue, skb);
1458 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1459 tp->syn_data_acked = 1;
1461 sk->sk_data_ready(sk, 0);
1462 bh_unlock_sock(child);
1464 WARN_ON(req->sk == NULL);
1468 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1470 struct tcp_extend_values tmp_ext;
1471 struct tcp_options_received tmp_opt;
1472 const u8 *hash_location;
1473 struct request_sock *req;
1474 struct inet_request_sock *ireq;
1475 struct tcp_sock *tp = tcp_sk(sk);
1476 struct dst_entry *dst = NULL;
1477 __be32 saddr = ip_hdr(skb)->saddr;
1478 __be32 daddr = ip_hdr(skb)->daddr;
1479 __u32 isn = TCP_SKB_CB(skb)->when;
1480 bool want_cookie = false;
1482 struct tcp_fastopen_cookie foc = { .len = -1 };
1483 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1484 struct sk_buff *skb_synack;
1487 /* Never answer to SYNs send to broadcast or multicast */
1488 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1491 /* TW buckets are converted to open requests without
1492 * limitations, they conserve resources and peer is
1493 * evidently real one.
1495 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1496 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1501 /* Accept backlog is full. If we have already queued enough
1502 * of warm entries in syn queue, drop request. It is better than
1503 * clogging syn queue with openreqs with exponentially increasing
1506 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1507 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1511 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1515 #ifdef CONFIG_TCP_MD5SIG
1516 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1519 tcp_clear_options(&tmp_opt);
1520 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1521 tmp_opt.user_mss = tp->rx_opt.user_mss;
1522 tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
1523 want_cookie ? NULL : &foc);
1525 if (tmp_opt.cookie_plus > 0 &&
1526 tmp_opt.saw_tstamp &&
1527 !tp->rx_opt.cookie_out_never &&
1528 (sysctl_tcp_cookie_size > 0 ||
1529 (tp->cookie_values != NULL &&
1530 tp->cookie_values->cookie_desired > 0))) {
1532 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1533 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1535 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1536 goto drop_and_release;
1538 /* Secret recipe starts with IP addresses */
1539 *mess++ ^= (__force u32)daddr;
1540 *mess++ ^= (__force u32)saddr;
1542 /* plus variable length Initiator Cookie */
1545 *c++ ^= *hash_location++;
1547 want_cookie = false; /* not our kind of cookie */
1548 tmp_ext.cookie_out_never = 0; /* false */
1549 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1550 } else if (!tp->rx_opt.cookie_in_always) {
1551 /* redundant indications, but ensure initialization. */
1552 tmp_ext.cookie_out_never = 1; /* true */
1553 tmp_ext.cookie_plus = 0;
1555 goto drop_and_release;
1557 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1559 if (want_cookie && !tmp_opt.saw_tstamp)
1560 tcp_clear_options(&tmp_opt);
1562 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1563 tcp_openreq_init(req, &tmp_opt, skb);
1565 ireq = inet_rsk(req);
1566 ireq->loc_addr = daddr;
1567 ireq->rmt_addr = saddr;
1568 ireq->no_srccheck = inet_sk(sk)->transparent;
1569 ireq->opt = tcp_v4_save_options(skb);
1571 if (security_inet_conn_request(sk, skb, req))
1574 if (!want_cookie || tmp_opt.tstamp_ok)
1575 TCP_ECN_create_request(req, skb, sock_net(sk));
1578 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1579 req->cookie_ts = tmp_opt.tstamp_ok;
1581 /* VJ's idea. We save last timestamp seen
1582 * from the destination in peer table, when entering
1583 * state TIME-WAIT, and check against it before
1584 * accepting new connection request.
1586 * If "isn" is not zero, this request hit alive
1587 * timewait bucket, so that all the necessary checks
1588 * are made in the function processing timewait state.
1590 if (tmp_opt.saw_tstamp &&
1591 tcp_death_row.sysctl_tw_recycle &&
1592 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1593 fl4.daddr == saddr) {
1594 if (!tcp_peer_is_proven(req, dst, true)) {
1595 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1596 goto drop_and_release;
1599 /* Kill the following clause, if you dislike this way. */
1600 else if (!sysctl_tcp_syncookies &&
1601 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1602 (sysctl_max_syn_backlog >> 2)) &&
1603 !tcp_peer_is_proven(req, dst, false)) {
1604 /* Without syncookies last quarter of
1605 * backlog is filled with destinations,
1606 * proven to be alive.
1607 * It means that we continue to communicate
1608 * to destinations, already remembered
1609 * to the moment of synflood.
1611 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1612 &saddr, ntohs(tcp_hdr(skb)->source));
1613 goto drop_and_release;
1616 isn = tcp_v4_init_sequence(skb);
1618 tcp_rsk(req)->snt_isn = isn;
1621 dst = inet_csk_route_req(sk, &fl4, req);
1625 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1627 /* We don't call tcp_v4_send_synack() directly because we need
1628 * to make sure a child socket can be created successfully before
1629 * sending back synack!
1631 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1632 * (or better yet, call tcp_send_synack() in the child context
1633 * directly, but will have to fix bunch of other code first)
1634 * after syn_recv_sock() except one will need to first fix the
1635 * latter to remove its dependency on the current implementation
1636 * of tcp_v4_send_synack()->tcp_select_initial_window().
1638 skb_synack = tcp_make_synack(sk, dst, req,
1639 (struct request_values *)&tmp_ext,
1640 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1643 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1644 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1648 if (likely(!do_fastopen)) {
1650 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1651 ireq->rmt_addr, ireq->opt);
1652 err = net_xmit_eval(err);
1653 if (err || want_cookie)
1656 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1657 tcp_rsk(req)->listener = NULL;
1658 /* Add the request_sock to the SYN table */
1659 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1660 if (fastopen_cookie_present(&foc) && foc.len != 0)
1661 NET_INC_STATS_BH(sock_net(sk),
1662 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1663 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
1664 (struct request_values *)&tmp_ext))
1674 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1677 EXPORT_SYMBOL(tcp_v4_conn_request);
1681 * The three way handshake has completed - we got a valid synack -
1682 * now create the new socket.
1684 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1685 struct request_sock *req,
1686 struct dst_entry *dst)
1688 struct inet_request_sock *ireq;
1689 struct inet_sock *newinet;
1690 struct tcp_sock *newtp;
1692 #ifdef CONFIG_TCP_MD5SIG
1693 struct tcp_md5sig_key *key;
1695 struct ip_options_rcu *inet_opt;
1697 if (sk_acceptq_is_full(sk))
1700 newsk = tcp_create_openreq_child(sk, req, skb);
1704 newsk->sk_gso_type = SKB_GSO_TCPV4;
1705 inet_sk_rx_dst_set(newsk, skb);
1707 newtp = tcp_sk(newsk);
1708 newinet = inet_sk(newsk);
1709 ireq = inet_rsk(req);
1710 newinet->inet_daddr = ireq->rmt_addr;
1711 newinet->inet_rcv_saddr = ireq->loc_addr;
1712 newinet->inet_saddr = ireq->loc_addr;
1713 inet_opt = ireq->opt;
1714 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1716 newinet->mc_index = inet_iif(skb);
1717 newinet->mc_ttl = ip_hdr(skb)->ttl;
1718 newinet->rcv_tos = ip_hdr(skb)->tos;
1719 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1721 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1722 newinet->inet_id = newtp->write_seq ^ jiffies;
1725 dst = inet_csk_route_child_sock(sk, newsk, req);
1729 /* syncookie case : see end of cookie_v4_check() */
1731 sk_setup_caps(newsk, dst);
1733 tcp_mtup_init(newsk);
1734 tcp_sync_mss(newsk, dst_mtu(dst));
1735 newtp->advmss = dst_metric_advmss(dst);
1736 if (tcp_sk(sk)->rx_opt.user_mss &&
1737 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1738 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1740 tcp_initialize_rcv_mss(newsk);
1741 tcp_synack_rtt_meas(newsk, req);
1742 newtp->total_retrans = req->num_retrans;
1744 #ifdef CONFIG_TCP_MD5SIG
1745 /* Copy over the MD5 key from the original socket */
1746 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1750 * We're using one, so create a matching key
1751 * on the newsk structure. If we fail to get
1752 * memory, then we end up not copying the key
1755 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1756 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1757 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1761 if (__inet_inherit_port(sk, newsk) < 0)
1763 __inet_hash_nolisten(newsk, NULL);
1768 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1772 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1775 inet_csk_prepare_forced_close(newsk);
1779 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1781 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1783 struct tcphdr *th = tcp_hdr(skb);
1784 const struct iphdr *iph = ip_hdr(skb);
1786 struct request_sock **prev;
1787 /* Find possible connection requests. */
1788 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1789 iph->saddr, iph->daddr);
1791 return tcp_check_req(sk, skb, req, prev, false);
1793 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1794 th->source, iph->daddr, th->dest, inet_iif(skb));
1797 if (nsk->sk_state != TCP_TIME_WAIT) {
1801 inet_twsk_put(inet_twsk(nsk));
1805 #ifdef CONFIG_SYN_COOKIES
1807 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1812 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1814 const struct iphdr *iph = ip_hdr(skb);
1816 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1817 if (!tcp_v4_check(skb->len, iph->saddr,
1818 iph->daddr, skb->csum)) {
1819 skb->ip_summed = CHECKSUM_UNNECESSARY;
1824 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1825 skb->len, IPPROTO_TCP, 0);
1827 if (skb->len <= 76) {
1828 return __skb_checksum_complete(skb);
1834 /* The socket must have it's spinlock held when we get
1837 * We have a potential double-lock case here, so even when
1838 * doing backlog processing we use the BH locking scheme.
1839 * This is because we cannot sleep with the original spinlock
1842 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1845 #ifdef CONFIG_TCP_MD5SIG
1847 * We really want to reject the packet as early as possible
1849 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1850 * o There is an MD5 option and we're not expecting one
1852 if (tcp_v4_inbound_md5_hash(sk, skb))
1856 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1857 struct dst_entry *dst = sk->sk_rx_dst;
1859 sock_rps_save_rxhash(sk, skb);
1861 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1862 dst->ops->check(dst, 0) == NULL) {
1864 sk->sk_rx_dst = NULL;
1867 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1874 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1877 if (sk->sk_state == TCP_LISTEN) {
1878 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1883 sock_rps_save_rxhash(nsk, skb);
1884 if (tcp_child_process(sk, nsk, skb)) {
1891 sock_rps_save_rxhash(sk, skb);
1893 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1900 tcp_v4_send_reset(rsk, skb);
1903 /* Be careful here. If this function gets more complicated and
1904 * gcc suffers from register pressure on the x86, sk (in %ebx)
1905 * might be destroyed here. This current version compiles correctly,
1906 * but you have been warned.
1911 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1914 EXPORT_SYMBOL(tcp_v4_do_rcv);
1916 void tcp_v4_early_demux(struct sk_buff *skb)
1918 const struct iphdr *iph;
1919 const struct tcphdr *th;
1922 if (skb->pkt_type != PACKET_HOST)
1925 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1931 if (th->doff < sizeof(struct tcphdr) / 4)
1934 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1935 iph->saddr, th->source,
1936 iph->daddr, ntohs(th->dest),
1940 skb->destructor = sock_edemux;
1941 if (sk->sk_state != TCP_TIME_WAIT) {
1942 struct dst_entry *dst = sk->sk_rx_dst;
1945 dst = dst_check(dst, 0);
1947 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1948 skb_dst_set_noref(skb, dst);
1953 /* Packet is added to VJ-style prequeue for processing in process
1954 * context, if a reader task is waiting. Apparently, this exciting
1955 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1956 * failed somewhere. Latency? Burstiness? Well, at least now we will
1957 * see, why it failed. 8)8) --ANK
1960 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1962 struct tcp_sock *tp = tcp_sk(sk);
1964 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1967 if (skb->len <= tcp_hdrlen(skb) &&
1968 skb_queue_len(&tp->ucopy.prequeue) == 0)
1971 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1972 tp->ucopy.memory += skb->truesize;
1973 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1974 struct sk_buff *skb1;
1976 BUG_ON(sock_owned_by_user(sk));
1978 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1979 sk_backlog_rcv(sk, skb1);
1980 NET_INC_STATS_BH(sock_net(sk),
1981 LINUX_MIB_TCPPREQUEUEDROPPED);
1984 tp->ucopy.memory = 0;
1985 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1986 wake_up_interruptible_sync_poll(sk_sleep(sk),
1987 POLLIN | POLLRDNORM | POLLRDBAND);
1988 if (!inet_csk_ack_scheduled(sk))
1989 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1990 (3 * tcp_rto_min(sk)) / 4,
1995 EXPORT_SYMBOL(tcp_prequeue);
2001 int tcp_v4_rcv(struct sk_buff *skb)
2003 const struct iphdr *iph;
2004 const struct tcphdr *th;
2007 struct net *net = dev_net(skb->dev);
2009 if (skb->pkt_type != PACKET_HOST)
2012 /* Count it even if it's bad */
2013 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
2015 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
2020 if (th->doff < sizeof(struct tcphdr) / 4)
2022 if (!pskb_may_pull(skb, th->doff * 4))
2025 /* An explanation is required here, I think.
2026 * Packet length and doff are validated by header prediction,
2027 * provided case of th->doff==0 is eliminated.
2028 * So, we defer the checks. */
2029 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
2034 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
2035 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
2036 skb->len - th->doff * 4);
2037 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
2038 TCP_SKB_CB(skb)->when = 0;
2039 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
2040 TCP_SKB_CB(skb)->sacked = 0;
2042 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
2047 if (sk->sk_state == TCP_TIME_WAIT)
2050 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
2051 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
2052 goto discard_and_relse;
2055 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2056 goto discard_and_relse;
2059 if (sk_filter(sk, skb))
2060 goto discard_and_relse;
2064 bh_lock_sock_nested(sk);
2066 if (!sock_owned_by_user(sk)) {
2067 #ifdef CONFIG_NET_DMA
2068 struct tcp_sock *tp = tcp_sk(sk);
2069 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2070 tp->ucopy.dma_chan = net_dma_find_channel();
2071 if (tp->ucopy.dma_chan)
2072 ret = tcp_v4_do_rcv(sk, skb);
2076 if (!tcp_prequeue(sk, skb))
2077 ret = tcp_v4_do_rcv(sk, skb);
2079 } else if (unlikely(sk_add_backlog(sk, skb,
2080 sk->sk_rcvbuf + sk->sk_sndbuf))) {
2082 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2083 goto discard_and_relse;
2092 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2095 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2097 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2099 tcp_v4_send_reset(NULL, skb);
2103 /* Discard frame. */
2112 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2113 inet_twsk_put(inet_twsk(sk));
2117 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2118 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2119 inet_twsk_put(inet_twsk(sk));
2122 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2124 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2126 iph->saddr, th->source,
2127 iph->daddr, th->dest,
2130 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2131 inet_twsk_put(inet_twsk(sk));
2135 /* Fall through to ACK */
2138 tcp_v4_timewait_ack(sk, skb);
2142 case TCP_TW_SUCCESS:;
2147 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2148 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2149 .twsk_unique = tcp_twsk_unique,
2150 .twsk_destructor= tcp_twsk_destructor,
2153 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2155 struct dst_entry *dst = skb_dst(skb);
2158 sk->sk_rx_dst = dst;
2159 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2161 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2163 const struct inet_connection_sock_af_ops ipv4_specific = {
2164 .queue_xmit = ip_queue_xmit,
2165 .send_check = tcp_v4_send_check,
2166 .rebuild_header = inet_sk_rebuild_header,
2167 .sk_rx_dst_set = inet_sk_rx_dst_set,
2168 .conn_request = tcp_v4_conn_request,
2169 .syn_recv_sock = tcp_v4_syn_recv_sock,
2170 .net_header_len = sizeof(struct iphdr),
2171 .setsockopt = ip_setsockopt,
2172 .getsockopt = ip_getsockopt,
2173 .addr2sockaddr = inet_csk_addr2sockaddr,
2174 .sockaddr_len = sizeof(struct sockaddr_in),
2175 .bind_conflict = inet_csk_bind_conflict,
2176 #ifdef CONFIG_COMPAT
2177 .compat_setsockopt = compat_ip_setsockopt,
2178 .compat_getsockopt = compat_ip_getsockopt,
2181 EXPORT_SYMBOL(ipv4_specific);
2183 #ifdef CONFIG_TCP_MD5SIG
2184 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2185 .md5_lookup = tcp_v4_md5_lookup,
2186 .calc_md5_hash = tcp_v4_md5_hash_skb,
2187 .md5_parse = tcp_v4_parse_md5_keys,
2191 /* NOTE: A lot of things set to zero explicitly by call to
2192 * sk_alloc() so need not be done here.
2194 static int tcp_v4_init_sock(struct sock *sk)
2196 struct inet_connection_sock *icsk = inet_csk(sk);
2200 icsk->icsk_af_ops = &ipv4_specific;
2202 #ifdef CONFIG_TCP_MD5SIG
2203 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2209 void tcp_v4_destroy_sock(struct sock *sk)
2211 struct tcp_sock *tp = tcp_sk(sk);
2213 tcp_clear_xmit_timers(sk);
2215 tcp_cleanup_congestion_control(sk);
2217 /* Cleanup up the write buffer. */
2218 tcp_write_queue_purge(sk);
2220 /* Cleans up our, hopefully empty, out_of_order_queue. */
2221 __skb_queue_purge(&tp->out_of_order_queue);
2223 #ifdef CONFIG_TCP_MD5SIG
2224 /* Clean up the MD5 key list, if any */
2225 if (tp->md5sig_info) {
2226 tcp_clear_md5_list(sk);
2227 kfree_rcu(tp->md5sig_info, rcu);
2228 tp->md5sig_info = NULL;
2232 #ifdef CONFIG_NET_DMA
2233 /* Cleans up our sk_async_wait_queue */
2234 __skb_queue_purge(&sk->sk_async_wait_queue);
2237 /* Clean prequeue, it must be empty really */
2238 __skb_queue_purge(&tp->ucopy.prequeue);
2240 /* Clean up a referenced TCP bind bucket. */
2241 if (inet_csk(sk)->icsk_bind_hash)
2244 /* TCP Cookie Transactions */
2245 if (tp->cookie_values != NULL) {
2246 kref_put(&tp->cookie_values->kref,
2247 tcp_cookie_values_release);
2248 tp->cookie_values = NULL;
2250 BUG_ON(tp->fastopen_rsk != NULL);
2252 /* If socket is aborted during connect operation */
2253 tcp_free_fastopen_req(tp);
2255 sk_sockets_allocated_dec(sk);
2256 sock_release_memcg(sk);
2258 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2260 #ifdef CONFIG_PROC_FS
2261 /* Proc filesystem TCP sock list dumping. */
2263 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2265 return hlist_nulls_empty(head) ? NULL :
2266 list_entry(head->first, struct inet_timewait_sock, tw_node);
2269 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2271 return !is_a_nulls(tw->tw_node.next) ?
2272 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2276 * Get next listener socket follow cur. If cur is NULL, get first socket
2277 * starting from bucket given in st->bucket; when st->bucket is zero the
2278 * very first socket in the hash table is returned.
2280 static void *listening_get_next(struct seq_file *seq, void *cur)
2282 struct inet_connection_sock *icsk;
2283 struct hlist_nulls_node *node;
2284 struct sock *sk = cur;
2285 struct inet_listen_hashbucket *ilb;
2286 struct tcp_iter_state *st = seq->private;
2287 struct net *net = seq_file_net(seq);
2290 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2291 spin_lock_bh(&ilb->lock);
2292 sk = sk_nulls_head(&ilb->head);
2296 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2300 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2301 struct request_sock *req = cur;
2303 icsk = inet_csk(st->syn_wait_sk);
2307 if (req->rsk_ops->family == st->family) {
2313 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2316 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2318 sk = sk_nulls_next(st->syn_wait_sk);
2319 st->state = TCP_SEQ_STATE_LISTENING;
2320 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2322 icsk = inet_csk(sk);
2323 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2324 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2326 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2327 sk = sk_nulls_next(sk);
2330 sk_nulls_for_each_from(sk, node) {
2331 if (!net_eq(sock_net(sk), net))
2333 if (sk->sk_family == st->family) {
2337 icsk = inet_csk(sk);
2338 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2339 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2341 st->uid = sock_i_uid(sk);
2342 st->syn_wait_sk = sk;
2343 st->state = TCP_SEQ_STATE_OPENREQ;
2347 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2349 spin_unlock_bh(&ilb->lock);
2351 if (++st->bucket < INET_LHTABLE_SIZE) {
2352 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2353 spin_lock_bh(&ilb->lock);
2354 sk = sk_nulls_head(&ilb->head);
2362 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2364 struct tcp_iter_state *st = seq->private;
2369 rc = listening_get_next(seq, NULL);
2371 while (rc && *pos) {
2372 rc = listening_get_next(seq, rc);
2378 static inline bool empty_bucket(struct tcp_iter_state *st)
2380 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2381 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2385 * Get first established socket starting from bucket given in st->bucket.
2386 * If st->bucket is zero, the very first socket in the hash is returned.
2388 static void *established_get_first(struct seq_file *seq)
2390 struct tcp_iter_state *st = seq->private;
2391 struct net *net = seq_file_net(seq);
2395 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2397 struct hlist_nulls_node *node;
2398 struct inet_timewait_sock *tw;
2399 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2401 /* Lockless fast path for the common case of empty buckets */
2402 if (empty_bucket(st))
2406 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2407 if (sk->sk_family != st->family ||
2408 !net_eq(sock_net(sk), net)) {
2414 st->state = TCP_SEQ_STATE_TIME_WAIT;
2415 inet_twsk_for_each(tw, node,
2416 &tcp_hashinfo.ehash[st->bucket].twchain) {
2417 if (tw->tw_family != st->family ||
2418 !net_eq(twsk_net(tw), net)) {
2424 spin_unlock_bh(lock);
2425 st->state = TCP_SEQ_STATE_ESTABLISHED;
2431 static void *established_get_next(struct seq_file *seq, void *cur)
2433 struct sock *sk = cur;
2434 struct inet_timewait_sock *tw;
2435 struct hlist_nulls_node *node;
2436 struct tcp_iter_state *st = seq->private;
2437 struct net *net = seq_file_net(seq);
2442 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2446 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2453 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2454 st->state = TCP_SEQ_STATE_ESTABLISHED;
2456 /* Look for next non empty bucket */
2458 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2461 if (st->bucket > tcp_hashinfo.ehash_mask)
2464 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2465 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2467 sk = sk_nulls_next(sk);
2469 sk_nulls_for_each_from(sk, node) {
2470 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2474 st->state = TCP_SEQ_STATE_TIME_WAIT;
2475 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2483 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2485 struct tcp_iter_state *st = seq->private;
2489 rc = established_get_first(seq);
2492 rc = established_get_next(seq, rc);
2498 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2501 struct tcp_iter_state *st = seq->private;
2503 st->state = TCP_SEQ_STATE_LISTENING;
2504 rc = listening_get_idx(seq, &pos);
2507 st->state = TCP_SEQ_STATE_ESTABLISHED;
2508 rc = established_get_idx(seq, pos);
2514 static void *tcp_seek_last_pos(struct seq_file *seq)
2516 struct tcp_iter_state *st = seq->private;
2517 int offset = st->offset;
2518 int orig_num = st->num;
2521 switch (st->state) {
2522 case TCP_SEQ_STATE_OPENREQ:
2523 case TCP_SEQ_STATE_LISTENING:
2524 if (st->bucket >= INET_LHTABLE_SIZE)
2526 st->state = TCP_SEQ_STATE_LISTENING;
2527 rc = listening_get_next(seq, NULL);
2528 while (offset-- && rc)
2529 rc = listening_get_next(seq, rc);
2534 case TCP_SEQ_STATE_ESTABLISHED:
2535 case TCP_SEQ_STATE_TIME_WAIT:
2536 st->state = TCP_SEQ_STATE_ESTABLISHED;
2537 if (st->bucket > tcp_hashinfo.ehash_mask)
2539 rc = established_get_first(seq);
2540 while (offset-- && rc)
2541 rc = established_get_next(seq, rc);
2549 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2551 struct tcp_iter_state *st = seq->private;
2554 if (*pos && *pos == st->last_pos) {
2555 rc = tcp_seek_last_pos(seq);
2560 st->state = TCP_SEQ_STATE_LISTENING;
2564 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2567 st->last_pos = *pos;
2571 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2573 struct tcp_iter_state *st = seq->private;
2576 if (v == SEQ_START_TOKEN) {
2577 rc = tcp_get_idx(seq, 0);
2581 switch (st->state) {
2582 case TCP_SEQ_STATE_OPENREQ:
2583 case TCP_SEQ_STATE_LISTENING:
2584 rc = listening_get_next(seq, v);
2586 st->state = TCP_SEQ_STATE_ESTABLISHED;
2589 rc = established_get_first(seq);
2592 case TCP_SEQ_STATE_ESTABLISHED:
2593 case TCP_SEQ_STATE_TIME_WAIT:
2594 rc = established_get_next(seq, v);
2599 st->last_pos = *pos;
2603 static void tcp_seq_stop(struct seq_file *seq, void *v)
2605 struct tcp_iter_state *st = seq->private;
2607 switch (st->state) {
2608 case TCP_SEQ_STATE_OPENREQ:
2610 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2611 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2613 case TCP_SEQ_STATE_LISTENING:
2614 if (v != SEQ_START_TOKEN)
2615 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2617 case TCP_SEQ_STATE_TIME_WAIT:
2618 case TCP_SEQ_STATE_ESTABLISHED:
2620 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2625 int tcp_seq_open(struct inode *inode, struct file *file)
2627 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2628 struct tcp_iter_state *s;
2631 err = seq_open_net(inode, file, &afinfo->seq_ops,
2632 sizeof(struct tcp_iter_state));
2636 s = ((struct seq_file *)file->private_data)->private;
2637 s->family = afinfo->family;
2641 EXPORT_SYMBOL(tcp_seq_open);
2643 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2646 struct proc_dir_entry *p;
2648 afinfo->seq_ops.start = tcp_seq_start;
2649 afinfo->seq_ops.next = tcp_seq_next;
2650 afinfo->seq_ops.stop = tcp_seq_stop;
2652 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2653 afinfo->seq_fops, afinfo);
2658 EXPORT_SYMBOL(tcp_proc_register);
2660 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2662 remove_proc_entry(afinfo->name, net->proc_net);
2664 EXPORT_SYMBOL(tcp_proc_unregister);
2666 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2667 struct seq_file *f, int i, kuid_t uid, int *len)
2669 const struct inet_request_sock *ireq = inet_rsk(req);
2670 long delta = req->expires - jiffies;
2672 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2673 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2676 ntohs(inet_sk(sk)->inet_sport),
2678 ntohs(ireq->rmt_port),
2680 0, 0, /* could print option size, but that is af dependent. */
2681 1, /* timers active (only the expire timer) */
2682 jiffies_delta_to_clock_t(delta),
2684 from_kuid_munged(seq_user_ns(f), uid),
2685 0, /* non standard timer */
2686 0, /* open_requests have no inode */
2687 atomic_read(&sk->sk_refcnt),
2692 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2695 unsigned long timer_expires;
2696 const struct tcp_sock *tp = tcp_sk(sk);
2697 const struct inet_connection_sock *icsk = inet_csk(sk);
2698 const struct inet_sock *inet = inet_sk(sk);
2699 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2700 __be32 dest = inet->inet_daddr;
2701 __be32 src = inet->inet_rcv_saddr;
2702 __u16 destp = ntohs(inet->inet_dport);
2703 __u16 srcp = ntohs(inet->inet_sport);
2706 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2707 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2708 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2710 timer_expires = icsk->icsk_timeout;
2711 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2713 timer_expires = icsk->icsk_timeout;
2714 } else if (timer_pending(&sk->sk_timer)) {
2716 timer_expires = sk->sk_timer.expires;
2719 timer_expires = jiffies;
2722 if (sk->sk_state == TCP_LISTEN)
2723 rx_queue = sk->sk_ack_backlog;
2726 * because we dont lock socket, we might find a transient negative value
2728 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2730 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2731 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2732 i, src, srcp, dest, destp, sk->sk_state,
2733 tp->write_seq - tp->snd_una,
2736 jiffies_delta_to_clock_t(timer_expires - jiffies),
2737 icsk->icsk_retransmits,
2738 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2739 icsk->icsk_probes_out,
2741 atomic_read(&sk->sk_refcnt), sk,
2742 jiffies_to_clock_t(icsk->icsk_rto),
2743 jiffies_to_clock_t(icsk->icsk_ack.ato),
2744 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2746 sk->sk_state == TCP_LISTEN ?
2747 (fastopenq ? fastopenq->max_qlen : 0) :
2748 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2752 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2753 struct seq_file *f, int i, int *len)
2757 long delta = tw->tw_ttd - jiffies;
2759 dest = tw->tw_daddr;
2760 src = tw->tw_rcv_saddr;
2761 destp = ntohs(tw->tw_dport);
2762 srcp = ntohs(tw->tw_sport);
2764 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2765 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2766 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2767 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2768 atomic_read(&tw->tw_refcnt), tw, len);
2773 static int tcp4_seq_show(struct seq_file *seq, void *v)
2775 struct tcp_iter_state *st;
2778 if (v == SEQ_START_TOKEN) {
2779 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2780 " sl local_address rem_address st tx_queue "
2781 "rx_queue tr tm->when retrnsmt uid timeout "
2787 switch (st->state) {
2788 case TCP_SEQ_STATE_LISTENING:
2789 case TCP_SEQ_STATE_ESTABLISHED:
2790 get_tcp4_sock(v, seq, st->num, &len);
2792 case TCP_SEQ_STATE_OPENREQ:
2793 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2795 case TCP_SEQ_STATE_TIME_WAIT:
2796 get_timewait4_sock(v, seq, st->num, &len);
2799 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2804 static const struct file_operations tcp_afinfo_seq_fops = {
2805 .owner = THIS_MODULE,
2806 .open = tcp_seq_open,
2808 .llseek = seq_lseek,
2809 .release = seq_release_net
2812 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2815 .seq_fops = &tcp_afinfo_seq_fops,
2817 .show = tcp4_seq_show,
2821 static int __net_init tcp4_proc_init_net(struct net *net)
2823 return tcp_proc_register(net, &tcp4_seq_afinfo);
2826 static void __net_exit tcp4_proc_exit_net(struct net *net)
2828 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2831 static struct pernet_operations tcp4_net_ops = {
2832 .init = tcp4_proc_init_net,
2833 .exit = tcp4_proc_exit_net,
2836 int __init tcp4_proc_init(void)
2838 return register_pernet_subsys(&tcp4_net_ops);
2841 void tcp4_proc_exit(void)
2843 unregister_pernet_subsys(&tcp4_net_ops);
2845 #endif /* CONFIG_PROC_FS */
2847 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2849 const struct iphdr *iph = skb_gro_network_header(skb);
2853 switch (skb->ip_summed) {
2854 case CHECKSUM_COMPLETE:
2855 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2857 skb->ip_summed = CHECKSUM_UNNECESSARY;
2861 NAPI_GRO_CB(skb)->flush = 1;
2865 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
2866 skb_gro_len(skb), IPPROTO_TCP, 0);
2867 sum = csum_fold(skb_checksum(skb,
2868 skb_gro_offset(skb),
2874 skb->ip_summed = CHECKSUM_UNNECESSARY;
2878 return tcp_gro_receive(head, skb);
2881 int tcp4_gro_complete(struct sk_buff *skb)
2883 const struct iphdr *iph = ip_hdr(skb);
2884 struct tcphdr *th = tcp_hdr(skb);
2886 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2887 iph->saddr, iph->daddr, 0);
2888 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2890 return tcp_gro_complete(skb);
2893 struct proto tcp_prot = {
2895 .owner = THIS_MODULE,
2897 .connect = tcp_v4_connect,
2898 .disconnect = tcp_disconnect,
2899 .accept = inet_csk_accept,
2901 .init = tcp_v4_init_sock,
2902 .destroy = tcp_v4_destroy_sock,
2903 .shutdown = tcp_shutdown,
2904 .setsockopt = tcp_setsockopt,
2905 .getsockopt = tcp_getsockopt,
2906 .recvmsg = tcp_recvmsg,
2907 .sendmsg = tcp_sendmsg,
2908 .sendpage = tcp_sendpage,
2909 .backlog_rcv = tcp_v4_do_rcv,
2910 .release_cb = tcp_release_cb,
2911 .mtu_reduced = tcp_v4_mtu_reduced,
2913 .unhash = inet_unhash,
2914 .get_port = inet_csk_get_port,
2915 .enter_memory_pressure = tcp_enter_memory_pressure,
2916 .sockets_allocated = &tcp_sockets_allocated,
2917 .orphan_count = &tcp_orphan_count,
2918 .memory_allocated = &tcp_memory_allocated,
2919 .memory_pressure = &tcp_memory_pressure,
2920 .sysctl_wmem = sysctl_tcp_wmem,
2921 .sysctl_rmem = sysctl_tcp_rmem,
2922 .max_header = MAX_TCP_HEADER,
2923 .obj_size = sizeof(struct tcp_sock),
2924 .slab_flags = SLAB_DESTROY_BY_RCU,
2925 .twsk_prot = &tcp_timewait_sock_ops,
2926 .rsk_prot = &tcp_request_sock_ops,
2927 .h.hashinfo = &tcp_hashinfo,
2928 .no_autobind = true,
2929 #ifdef CONFIG_COMPAT
2930 .compat_setsockopt = compat_tcp_setsockopt,
2931 .compat_getsockopt = compat_tcp_getsockopt,
2933 #ifdef CONFIG_MEMCG_KMEM
2934 .init_cgroup = tcp_init_cgroup,
2935 .destroy_cgroup = tcp_destroy_cgroup,
2936 .proto_cgroup = tcp_proto_cgroup,
2939 EXPORT_SYMBOL(tcp_prot);
2941 static int __net_init tcp_sk_init(struct net *net)
2943 net->ipv4.sysctl_tcp_ecn = 2;
2947 static void __net_exit tcp_sk_exit(struct net *net)
2951 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2953 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2956 static struct pernet_operations __net_initdata tcp_sk_ops = {
2957 .init = tcp_sk_init,
2958 .exit = tcp_sk_exit,
2959 .exit_batch = tcp_sk_exit_batch,
2962 void __init tcp_v4_init(void)
2964 inet_hashinfo_init(&tcp_hashinfo);
2965 if (register_pernet_subsys(&tcp_sk_ops))
2966 panic("Failed to create the TCP control socket.\n");