2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
100 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
105 tcp_hdr(skb)->source);
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
124 if (tcptw->tw_ts_recent_stamp &&
125 (twp == NULL || (sysctl_tcp_tw_reuse &&
126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
146 __be16 orig_sport, orig_dport;
147 __be32 daddr, nexthop;
151 struct ip_options_rcu *inet_opt;
153 if (addr_len < sizeof(struct sockaddr_in))
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
159 nexthop = daddr = usin->sin_addr.s_addr;
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
165 nexthop = inet_opt->opt.faddr;
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 orig_sport, orig_dport, sk);
177 if (err == -ENETUNREACH)
178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
187 if (!inet_opt || !inet_opt->opt.srr)
190 if (!inet->inet_saddr)
191 inet->inet_saddr = fl4->saddr;
192 inet->inet_rcv_saddr = inet->inet_saddr;
194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
198 if (likely(!tp->repair))
202 if (tcp_death_row.sysctl_tw_recycle &&
203 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 tcp_fetch_timewait_stamp(sk, &rt->dst);
206 inet->inet_dport = usin->sin_port;
207 inet->inet_daddr = daddr;
209 inet_csk(sk)->icsk_ext_hdr_len = 0;
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
220 tcp_set_state(sk, TCP_SYN_SENT);
221 err = inet_hash_connect(&tcp_death_row, sk);
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
238 if (!tp->write_seq && likely(!tp->repair))
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
244 inet->inet_id = tp->write_seq ^ jiffies;
246 err = tcp_connect(sk);
256 * This unhashes the socket and releases the local port,
259 tcp_set_state(sk, TCP_CLOSE);
261 sk->sk_route_caps = 0;
262 inet->inet_dport = 0;
265 EXPORT_SYMBOL(tcp_v4_connect);
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
272 void tcp_v4_mtu_reduced(struct sock *sk)
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
276 u32 mtu = tcp_sk(sk)->mtu_info;
278 dst = inet_csk_update_pmtu(sk, mtu);
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 ip_sk_accept_pmtu(sk) &&
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 tcp_sync_mss(sk, mtu);
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
307 struct dst_entry *dst = __sk_dst_check(sk, 0);
310 dst->ops->redirect(dst, sk, skb);
314 * This routine is called by the ICMP module when it gets some
315 * sort of error condition. If err < 0 then the socket should
316 * be closed and the error returned to the user. If err > 0
317 * it's just the icmp type << 8 | icmp code. After adjustment
318 * header points to the first 8 bytes of the tcp header. We need
319 * to find the appropriate port.
321 * The locking strategy used here is very "optimistic". When
322 * someone else accesses the socket the ICMP is just dropped
323 * and for some paths there is no check at all.
324 * A more general error queue to queue errors for later handling
325 * is probably better.
329 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
331 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
332 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
333 struct inet_connection_sock *icsk;
335 struct inet_sock *inet;
336 const int type = icmp_hdr(icmp_skb)->type;
337 const int code = icmp_hdr(icmp_skb)->code;
340 struct request_sock *fastopen;
344 struct net *net = dev_net(icmp_skb->dev);
346 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
347 iph->saddr, th->source, inet_iif(icmp_skb));
349 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
352 if (sk->sk_state == TCP_TIME_WAIT) {
353 inet_twsk_put(inet_twsk(sk));
358 /* If too many ICMPs get dropped on busy
359 * servers this needs to be solved differently.
360 * We do take care of PMTU discovery (RFC1191) special case :
361 * we can receive locally generated ICMP messages while socket is held.
363 if (sock_owned_by_user(sk)) {
364 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
365 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
367 if (sk->sk_state == TCP_CLOSE)
370 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
371 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
377 seq = ntohl(th->seq);
378 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
379 fastopen = tp->fastopen_rsk;
380 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
381 if (sk->sk_state != TCP_LISTEN &&
382 !between(seq, snd_una, tp->snd_nxt)) {
383 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
389 do_redirect(icmp_skb, sk);
391 case ICMP_SOURCE_QUENCH:
392 /* Just silently ignore these. */
394 case ICMP_PARAMETERPROB:
397 case ICMP_DEST_UNREACH:
398 if (code > NR_ICMP_UNREACH)
401 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
402 /* We are not interested in TCP_LISTEN and open_requests
403 * (SYN-ACKs send out by Linux are always <576bytes so
404 * they should go through unfragmented).
406 if (sk->sk_state == TCP_LISTEN)
410 if (!sock_owned_by_user(sk)) {
411 tcp_v4_mtu_reduced(sk);
413 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
419 err = icmp_err_convert[code].errno;
420 /* check if icmp_skb allows revert of backoff
421 * (see draft-zimmermann-tcp-lcd) */
422 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
424 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
425 !icsk->icsk_backoff || fastopen)
428 if (sock_owned_by_user(sk))
431 icsk->icsk_backoff--;
432 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
434 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
436 skb = tcp_write_queue_head(sk);
439 remaining = icsk->icsk_rto -
441 tcp_time_stamp - tcp_skb_timestamp(skb));
444 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
445 remaining, TCP_RTO_MAX);
447 /* RTO revert clocked out retransmission.
448 * Will retransmit now */
449 tcp_retransmit_timer(sk);
453 case ICMP_TIME_EXCEEDED:
460 switch (sk->sk_state) {
461 struct request_sock *req, **prev;
463 if (sock_owned_by_user(sk))
466 req = inet_csk_search_req(sk, &prev, th->dest,
467 iph->daddr, iph->saddr);
471 /* ICMPs are not backlogged, hence we cannot get
472 an established socket here.
476 if (seq != tcp_rsk(req)->snt_isn) {
477 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
482 * Still in SYN_RECV, just remove it silently.
483 * There is no good way to pass the error to the newly
484 * created socket, and POSIX does not want network
485 * errors returned from accept().
487 inet_csk_reqsk_queue_drop(sk, req, prev);
488 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
493 /* Only in fast or simultaneous open. If a fast open socket is
494 * is already accepted it is treated as a connected one below.
496 if (fastopen && fastopen->sk == NULL)
499 if (!sock_owned_by_user(sk)) {
502 sk->sk_error_report(sk);
506 sk->sk_err_soft = err;
511 /* If we've already connected we will keep trying
512 * until we time out, or the user gives up.
514 * rfc1122 4.2.3.9 allows to consider as hard errors
515 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
516 * but it is obsoleted by pmtu discovery).
518 * Note, that in modern internet, where routing is unreliable
519 * and in each dark corner broken firewalls sit, sending random
520 * errors ordered by their masters even this two messages finally lose
521 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 * Now we are in compliance with RFCs.
528 if (!sock_owned_by_user(sk) && inet->recverr) {
530 sk->sk_error_report(sk);
531 } else { /* Only an error on timeout */
532 sk->sk_err_soft = err;
540 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 struct tcphdr *th = tcp_hdr(skb);
544 if (skb->ip_summed == CHECKSUM_PARTIAL) {
545 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
546 skb->csum_start = skb_transport_header(skb) - skb->head;
547 skb->csum_offset = offsetof(struct tcphdr, check);
549 th->check = tcp_v4_check(skb->len, saddr, daddr,
556 /* This routine computes an IPv4 TCP checksum. */
557 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 const struct inet_sock *inet = inet_sk(sk);
561 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 EXPORT_SYMBOL(tcp_v4_send_check);
566 * This routine will send an RST to the other tcp.
568 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570 * Answer: if a packet caused RST, it is not for a socket
571 * existing in our system, if it is matched to a socket,
572 * it is just duplicate segment or bug in other side's TCP.
573 * So that we build reply only basing on parameters
574 * arrived with segment.
575 * Exception: precedence violation. We do not implement it in any case.
578 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
580 const struct tcphdr *th = tcp_hdr(skb);
583 #ifdef CONFIG_TCP_MD5SIG
584 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
587 struct ip_reply_arg arg;
588 #ifdef CONFIG_TCP_MD5SIG
589 struct tcp_md5sig_key *key;
590 const __u8 *hash_location = NULL;
591 unsigned char newhash[16];
593 struct sock *sk1 = NULL;
597 /* Never send a reset in response to a reset. */
601 /* If sk not NULL, it means we did a successful lookup and incoming
602 * route had to be correct. prequeue might have dropped our dst.
604 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
607 /* Swap the send and the receive. */
608 memset(&rep, 0, sizeof(rep));
609 rep.th.dest = th->source;
610 rep.th.source = th->dest;
611 rep.th.doff = sizeof(struct tcphdr) / 4;
615 rep.th.seq = th->ack_seq;
618 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
619 skb->len - (th->doff << 2));
622 memset(&arg, 0, sizeof(arg));
623 arg.iov[0].iov_base = (unsigned char *)&rep;
624 arg.iov[0].iov_len = sizeof(rep.th);
626 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
627 #ifdef CONFIG_TCP_MD5SIG
628 hash_location = tcp_parse_md5sig_option(th);
629 if (!sk && hash_location) {
631 * active side is lost. Try to find listening socket through
632 * source port, and then find md5 key through listening socket.
633 * we are not loose security here:
634 * Incoming packet is checked with md5 hash with finding key,
635 * no RST generated if md5 hash doesn't match.
637 sk1 = __inet_lookup_listener(net,
638 &tcp_hashinfo, ip_hdr(skb)->saddr,
639 th->source, ip_hdr(skb)->daddr,
640 ntohs(th->source), inet_iif(skb));
641 /* don't send rst if it can't find key */
645 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
646 &ip_hdr(skb)->saddr, AF_INET);
650 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
651 if (genhash || memcmp(hash_location, newhash, 16) != 0)
654 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
660 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662 (TCPOPT_MD5SIG << 8) |
664 /* Update length and the length the header thinks exists */
665 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
666 rep.th.doff = arg.iov[0].iov_len / 4;
668 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
669 key, ip_hdr(skb)->saddr,
670 ip_hdr(skb)->daddr, &rep.th);
673 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
674 ip_hdr(skb)->saddr, /* XXX */
675 arg.iov[0].iov_len, IPPROTO_TCP, 0);
676 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
677 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
678 /* When socket is gone, all binding information is lost.
679 * routing might fail in this case. No choice here, if we choose to force
680 * input interface, we will misroute in case of asymmetric route.
683 arg.bound_dev_if = sk->sk_bound_dev_if;
685 arg.tos = ip_hdr(skb)->tos;
686 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
687 skb, &TCP_SKB_CB(skb)->header.h4.opt,
688 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
689 &arg, arg.iov[0].iov_len);
691 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
692 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694 #ifdef CONFIG_TCP_MD5SIG
703 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
704 outside socket context is ugly, certainly. What can I do?
707 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
708 u32 win, u32 tsval, u32 tsecr, int oif,
709 struct tcp_md5sig_key *key,
710 int reply_flags, u8 tos)
712 const struct tcphdr *th = tcp_hdr(skb);
715 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
716 #ifdef CONFIG_TCP_MD5SIG
717 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
721 struct ip_reply_arg arg;
722 struct net *net = dev_net(skb_dst(skb)->dev);
724 memset(&rep.th, 0, sizeof(struct tcphdr));
725 memset(&arg, 0, sizeof(arg));
727 arg.iov[0].iov_base = (unsigned char *)&rep;
728 arg.iov[0].iov_len = sizeof(rep.th);
730 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
731 (TCPOPT_TIMESTAMP << 8) |
733 rep.opt[1] = htonl(tsval);
734 rep.opt[2] = htonl(tsecr);
735 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
738 /* Swap the send and the receive. */
739 rep.th.dest = th->source;
740 rep.th.source = th->dest;
741 rep.th.doff = arg.iov[0].iov_len / 4;
742 rep.th.seq = htonl(seq);
743 rep.th.ack_seq = htonl(ack);
745 rep.th.window = htons(win);
747 #ifdef CONFIG_TCP_MD5SIG
749 int offset = (tsecr) ? 3 : 0;
751 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753 (TCPOPT_MD5SIG << 8) |
755 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
756 rep.th.doff = arg.iov[0].iov_len/4;
758 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
759 key, ip_hdr(skb)->saddr,
760 ip_hdr(skb)->daddr, &rep.th);
763 arg.flags = reply_flags;
764 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
765 ip_hdr(skb)->saddr, /* XXX */
766 arg.iov[0].iov_len, IPPROTO_TCP, 0);
767 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769 arg.bound_dev_if = oif;
771 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
772 skb, &TCP_SKB_CB(skb)->header.h4.opt,
773 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
774 &arg, arg.iov[0].iov_len);
776 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
779 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 struct inet_timewait_sock *tw = inet_twsk(sk);
782 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
785 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
786 tcp_time_stamp + tcptw->tw_ts_offset,
789 tcp_twsk_md5_key(tcptw),
790 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
797 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
798 struct request_sock *req)
800 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
801 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
804 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
805 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
809 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
816 * Send a SYN-ACK after having received a SYN.
817 * This still operates on a request_sock only, not on a big
820 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
822 struct request_sock *req,
824 struct tcp_fastopen_cookie *foc)
826 const struct inet_request_sock *ireq = inet_rsk(req);
831 /* First, grab a route. */
832 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
835 skb = tcp_make_synack(sk, dst, req, foc);
838 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840 skb_set_queue_mapping(skb, queue_mapping);
841 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
844 err = net_xmit_eval(err);
851 * IPv4 request_sock destructor.
853 static void tcp_v4_reqsk_destructor(struct request_sock *req)
855 kfree(inet_rsk(req)->opt);
859 * Return true if a syncookie should be sent
861 bool tcp_syn_flood_action(struct sock *sk,
862 const struct sk_buff *skb,
865 const char *msg = "Dropping request";
866 bool want_cookie = false;
867 struct listen_sock *lopt;
869 #ifdef CONFIG_SYN_COOKIES
870 if (sysctl_tcp_syncookies) {
871 msg = "Sending cookies";
873 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
876 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
878 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
879 if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
880 lopt->synflood_warned = 1;
881 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
882 proto, ntohs(tcp_hdr(skb)->dest), msg);
886 EXPORT_SYMBOL(tcp_syn_flood_action);
888 #ifdef CONFIG_TCP_MD5SIG
890 * RFC2385 MD5 checksumming requires a mapping of
891 * IP address->MD5 Key.
892 * We need to maintain these in the sk structure.
895 /* Find the Key structure for an address. */
896 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
897 const union tcp_md5_addr *addr,
900 struct tcp_sock *tp = tcp_sk(sk);
901 struct tcp_md5sig_key *key;
902 unsigned int size = sizeof(struct in_addr);
903 struct tcp_md5sig_info *md5sig;
905 /* caller either holds rcu_read_lock() or socket lock */
906 md5sig = rcu_dereference_check(tp->md5sig_info,
907 sock_owned_by_user(sk) ||
908 lockdep_is_held(&sk->sk_lock.slock));
911 #if IS_ENABLED(CONFIG_IPV6)
912 if (family == AF_INET6)
913 size = sizeof(struct in6_addr);
915 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
916 if (key->family != family)
918 if (!memcmp(&key->addr, addr, size))
923 EXPORT_SYMBOL(tcp_md5_do_lookup);
925 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
926 struct sock *addr_sk)
928 union tcp_md5_addr *addr;
930 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
931 return tcp_md5_do_lookup(sk, addr, AF_INET);
933 EXPORT_SYMBOL(tcp_v4_md5_lookup);
935 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
936 struct request_sock *req)
938 union tcp_md5_addr *addr;
940 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
941 return tcp_md5_do_lookup(sk, addr, AF_INET);
944 /* This can be called on a newly created socket, from other files */
945 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
946 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
948 /* Add Key to the list */
949 struct tcp_md5sig_key *key;
950 struct tcp_sock *tp = tcp_sk(sk);
951 struct tcp_md5sig_info *md5sig;
953 key = tcp_md5_do_lookup(sk, addr, family);
955 /* Pre-existing entry - just update that one. */
956 memcpy(key->key, newkey, newkeylen);
957 key->keylen = newkeylen;
961 md5sig = rcu_dereference_protected(tp->md5sig_info,
962 sock_owned_by_user(sk));
964 md5sig = kmalloc(sizeof(*md5sig), gfp);
968 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
969 INIT_HLIST_HEAD(&md5sig->head);
970 rcu_assign_pointer(tp->md5sig_info, md5sig);
973 key = sock_kmalloc(sk, sizeof(*key), gfp);
976 if (!tcp_alloc_md5sig_pool()) {
977 sock_kfree_s(sk, key, sizeof(*key));
981 memcpy(key->key, newkey, newkeylen);
982 key->keylen = newkeylen;
983 key->family = family;
984 memcpy(&key->addr, addr,
985 (family == AF_INET6) ? sizeof(struct in6_addr) :
986 sizeof(struct in_addr));
987 hlist_add_head_rcu(&key->node, &md5sig->head);
990 EXPORT_SYMBOL(tcp_md5_do_add);
992 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
994 struct tcp_md5sig_key *key;
996 key = tcp_md5_do_lookup(sk, addr, family);
999 hlist_del_rcu(&key->node);
1000 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1001 kfree_rcu(key, rcu);
1004 EXPORT_SYMBOL(tcp_md5_do_del);
1006 static void tcp_clear_md5_list(struct sock *sk)
1008 struct tcp_sock *tp = tcp_sk(sk);
1009 struct tcp_md5sig_key *key;
1010 struct hlist_node *n;
1011 struct tcp_md5sig_info *md5sig;
1013 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1015 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1016 hlist_del_rcu(&key->node);
1017 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1018 kfree_rcu(key, rcu);
1022 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1025 struct tcp_md5sig cmd;
1026 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1028 if (optlen < sizeof(cmd))
1031 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1034 if (sin->sin_family != AF_INET)
1037 if (!cmd.tcpm_keylen)
1038 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1041 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1044 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1045 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1049 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1050 __be32 daddr, __be32 saddr, int nbytes)
1052 struct tcp4_pseudohdr *bp;
1053 struct scatterlist sg;
1055 bp = &hp->md5_blk.ip4;
1058 * 1. the TCP pseudo-header (in the order: source IP address,
1059 * destination IP address, zero-padded protocol number, and
1065 bp->protocol = IPPROTO_TCP;
1066 bp->len = cpu_to_be16(nbytes);
1068 sg_init_one(&sg, bp, sizeof(*bp));
1069 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1072 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1073 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1075 struct tcp_md5sig_pool *hp;
1076 struct hash_desc *desc;
1078 hp = tcp_get_md5sig_pool();
1080 goto clear_hash_noput;
1081 desc = &hp->md5_desc;
1083 if (crypto_hash_init(desc))
1085 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1087 if (tcp_md5_hash_header(hp, th))
1089 if (tcp_md5_hash_key(hp, key))
1091 if (crypto_hash_final(desc, md5_hash))
1094 tcp_put_md5sig_pool();
1098 tcp_put_md5sig_pool();
1100 memset(md5_hash, 0, 16);
1104 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1105 const struct sock *sk, const struct request_sock *req,
1106 const struct sk_buff *skb)
1108 struct tcp_md5sig_pool *hp;
1109 struct hash_desc *desc;
1110 const struct tcphdr *th = tcp_hdr(skb);
1111 __be32 saddr, daddr;
1114 saddr = inet_sk(sk)->inet_saddr;
1115 daddr = inet_sk(sk)->inet_daddr;
1117 saddr = inet_rsk(req)->ir_loc_addr;
1118 daddr = inet_rsk(req)->ir_rmt_addr;
1120 const struct iphdr *iph = ip_hdr(skb);
1125 hp = tcp_get_md5sig_pool();
1127 goto clear_hash_noput;
1128 desc = &hp->md5_desc;
1130 if (crypto_hash_init(desc))
1133 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1135 if (tcp_md5_hash_header(hp, th))
1137 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1139 if (tcp_md5_hash_key(hp, key))
1141 if (crypto_hash_final(desc, md5_hash))
1144 tcp_put_md5sig_pool();
1148 tcp_put_md5sig_pool();
1150 memset(md5_hash, 0, 16);
1153 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1155 static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
1156 const struct sk_buff *skb)
1159 * This gets called for each TCP segment that arrives
1160 * so we want to be efficient.
1161 * We have 3 drop cases:
1162 * o No MD5 hash and one expected.
1163 * o MD5 hash and we're not expecting one.
1164 * o MD5 hash and its wrong.
1166 const __u8 *hash_location = NULL;
1167 struct tcp_md5sig_key *hash_expected;
1168 const struct iphdr *iph = ip_hdr(skb);
1169 const struct tcphdr *th = tcp_hdr(skb);
1171 unsigned char newhash[16];
1173 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1175 hash_location = tcp_parse_md5sig_option(th);
1177 /* We've parsed the options - do we have a hash? */
1178 if (!hash_expected && !hash_location)
1181 if (hash_expected && !hash_location) {
1182 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1186 if (!hash_expected && hash_location) {
1187 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1191 /* Okay, so this is hash_expected and hash_location -
1192 * so we need to calculate the checksum.
1194 genhash = tcp_v4_md5_hash_skb(newhash,
1198 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1199 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1200 &iph->saddr, ntohs(th->source),
1201 &iph->daddr, ntohs(th->dest),
1202 genhash ? " tcp_v4_calc_md5_hash failed"
1209 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1214 ret = __tcp_v4_inbound_md5_hash(sk, skb);
1222 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
1223 struct sk_buff *skb)
1225 struct inet_request_sock *ireq = inet_rsk(req);
1227 ireq->ir_loc_addr = ip_hdr(skb)->daddr;
1228 ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
1229 ireq->no_srccheck = inet_sk(sk)->transparent;
1230 ireq->opt = tcp_v4_save_options(skb);
1233 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1234 const struct request_sock *req,
1237 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1240 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1249 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1251 .obj_size = sizeof(struct tcp_request_sock),
1252 .rtx_syn_ack = tcp_rtx_synack,
1253 .send_ack = tcp_v4_reqsk_send_ack,
1254 .destructor = tcp_v4_reqsk_destructor,
1255 .send_reset = tcp_v4_send_reset,
1256 .syn_ack_timeout = tcp_syn_ack_timeout,
1259 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1260 .mss_clamp = TCP_MSS_DEFAULT,
1261 #ifdef CONFIG_TCP_MD5SIG
1262 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1263 .calc_md5_hash = tcp_v4_md5_hash_skb,
1265 .init_req = tcp_v4_init_req,
1266 #ifdef CONFIG_SYN_COOKIES
1267 .cookie_init_seq = cookie_v4_init_sequence,
1269 .route_req = tcp_v4_route_req,
1270 .init_seq = tcp_v4_init_sequence,
1271 .send_synack = tcp_v4_send_synack,
1272 .queue_hash_add = inet_csk_reqsk_queue_hash_add,
1275 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1277 /* Never answer to SYNs send to broadcast or multicast */
1278 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1281 return tcp_conn_request(&tcp_request_sock_ops,
1282 &tcp_request_sock_ipv4_ops, sk, skb);
1285 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1288 EXPORT_SYMBOL(tcp_v4_conn_request);
1292 * The three way handshake has completed - we got a valid synack -
1293 * now create the new socket.
1295 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1296 struct request_sock *req,
1297 struct dst_entry *dst)
1299 struct inet_request_sock *ireq;
1300 struct inet_sock *newinet;
1301 struct tcp_sock *newtp;
1303 #ifdef CONFIG_TCP_MD5SIG
1304 struct tcp_md5sig_key *key;
1306 struct ip_options_rcu *inet_opt;
1308 if (sk_acceptq_is_full(sk))
1311 newsk = tcp_create_openreq_child(sk, req, skb);
1315 newsk->sk_gso_type = SKB_GSO_TCPV4;
1316 inet_sk_rx_dst_set(newsk, skb);
1318 newtp = tcp_sk(newsk);
1319 newinet = inet_sk(newsk);
1320 ireq = inet_rsk(req);
1321 newinet->inet_daddr = ireq->ir_rmt_addr;
1322 newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1323 newinet->inet_saddr = ireq->ir_loc_addr;
1324 inet_opt = ireq->opt;
1325 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1327 newinet->mc_index = inet_iif(skb);
1328 newinet->mc_ttl = ip_hdr(skb)->ttl;
1329 newinet->rcv_tos = ip_hdr(skb)->tos;
1330 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1331 inet_set_txhash(newsk);
1333 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1334 newinet->inet_id = newtp->write_seq ^ jiffies;
1337 dst = inet_csk_route_child_sock(sk, newsk, req);
1341 /* syncookie case : see end of cookie_v4_check() */
1343 sk_setup_caps(newsk, dst);
1345 tcp_ca_openreq_child(newsk, dst);
1347 tcp_sync_mss(newsk, dst_mtu(dst));
1348 newtp->advmss = dst_metric_advmss(dst);
1349 if (tcp_sk(sk)->rx_opt.user_mss &&
1350 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1351 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1353 tcp_initialize_rcv_mss(newsk);
1355 #ifdef CONFIG_TCP_MD5SIG
1356 /* Copy over the MD5 key from the original socket */
1357 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1361 * We're using one, so create a matching key
1362 * on the newsk structure. If we fail to get
1363 * memory, then we end up not copying the key
1366 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1367 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1368 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1372 if (__inet_inherit_port(sk, newsk) < 0)
1374 __inet_hash_nolisten(newsk, NULL);
1379 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1383 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1386 inet_csk_prepare_forced_close(newsk);
1390 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1392 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1394 struct tcphdr *th = tcp_hdr(skb);
1395 const struct iphdr *iph = ip_hdr(skb);
1397 struct request_sock **prev;
1398 /* Find possible connection requests. */
1399 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1400 iph->saddr, iph->daddr);
1402 return tcp_check_req(sk, skb, req, prev, false);
1404 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1405 th->source, iph->daddr, th->dest, inet_iif(skb));
1408 if (nsk->sk_state != TCP_TIME_WAIT) {
1412 inet_twsk_put(inet_twsk(nsk));
1416 #ifdef CONFIG_SYN_COOKIES
1418 sk = cookie_v4_check(sk, skb);
1423 /* The socket must have it's spinlock held when we get
1426 * We have a potential double-lock case here, so even when
1427 * doing backlog processing we use the BH locking scheme.
1428 * This is because we cannot sleep with the original spinlock
1431 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1435 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1436 struct dst_entry *dst = sk->sk_rx_dst;
1438 sock_rps_save_rxhash(sk, skb);
1439 sk_mark_napi_id(sk, skb);
1441 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1442 dst->ops->check(dst, 0) == NULL) {
1444 sk->sk_rx_dst = NULL;
1447 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1451 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1454 if (sk->sk_state == TCP_LISTEN) {
1455 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1460 sock_rps_save_rxhash(nsk, skb);
1461 sk_mark_napi_id(sk, skb);
1462 if (tcp_child_process(sk, nsk, skb)) {
1469 sock_rps_save_rxhash(sk, skb);
1471 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1478 tcp_v4_send_reset(rsk, skb);
1481 /* Be careful here. If this function gets more complicated and
1482 * gcc suffers from register pressure on the x86, sk (in %ebx)
1483 * might be destroyed here. This current version compiles correctly,
1484 * but you have been warned.
1489 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1490 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1493 EXPORT_SYMBOL(tcp_v4_do_rcv);
1495 void tcp_v4_early_demux(struct sk_buff *skb)
1497 const struct iphdr *iph;
1498 const struct tcphdr *th;
1501 if (skb->pkt_type != PACKET_HOST)
1504 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1510 if (th->doff < sizeof(struct tcphdr) / 4)
1513 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1514 iph->saddr, th->source,
1515 iph->daddr, ntohs(th->dest),
1519 skb->destructor = sock_edemux;
1520 if (sk->sk_state != TCP_TIME_WAIT) {
1521 struct dst_entry *dst = sk->sk_rx_dst;
1524 dst = dst_check(dst, 0);
1526 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1527 skb_dst_set_noref(skb, dst);
1532 /* Packet is added to VJ-style prequeue for processing in process
1533 * context, if a reader task is waiting. Apparently, this exciting
1534 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1535 * failed somewhere. Latency? Burstiness? Well, at least now we will
1536 * see, why it failed. 8)8) --ANK
1539 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1541 struct tcp_sock *tp = tcp_sk(sk);
1543 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1546 if (skb->len <= tcp_hdrlen(skb) &&
1547 skb_queue_len(&tp->ucopy.prequeue) == 0)
1550 /* Before escaping RCU protected region, we need to take care of skb
1551 * dst. Prequeue is only enabled for established sockets.
1552 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1553 * Instead of doing full sk_rx_dst validity here, let's perform
1554 * an optimistic check.
1556 if (likely(sk->sk_rx_dst))
1561 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1562 tp->ucopy.memory += skb->truesize;
1563 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1564 struct sk_buff *skb1;
1566 BUG_ON(sock_owned_by_user(sk));
1568 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1569 sk_backlog_rcv(sk, skb1);
1570 NET_INC_STATS_BH(sock_net(sk),
1571 LINUX_MIB_TCPPREQUEUEDROPPED);
1574 tp->ucopy.memory = 0;
1575 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1576 wake_up_interruptible_sync_poll(sk_sleep(sk),
1577 POLLIN | POLLRDNORM | POLLRDBAND);
1578 if (!inet_csk_ack_scheduled(sk))
1579 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1580 (3 * tcp_rto_min(sk)) / 4,
1585 EXPORT_SYMBOL(tcp_prequeue);
1591 int tcp_v4_rcv(struct sk_buff *skb)
1593 const struct iphdr *iph;
1594 const struct tcphdr *th;
1597 struct net *net = dev_net(skb->dev);
1599 if (skb->pkt_type != PACKET_HOST)
1602 /* Count it even if it's bad */
1603 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1605 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1610 if (th->doff < sizeof(struct tcphdr) / 4)
1612 if (!pskb_may_pull(skb, th->doff * 4))
1615 /* An explanation is required here, I think.
1616 * Packet length and doff are validated by header prediction,
1617 * provided case of th->doff==0 is eliminated.
1618 * So, we defer the checks. */
1620 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1625 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1626 * barrier() makes sure compiler wont play fool^Waliasing games.
1628 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1629 sizeof(struct inet_skb_parm));
1632 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1633 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1634 skb->len - th->doff * 4);
1635 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1636 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1637 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1638 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1639 TCP_SKB_CB(skb)->sacked = 0;
1641 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1646 if (sk->sk_state == TCP_TIME_WAIT)
1649 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1650 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1651 goto discard_and_relse;
1654 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1655 goto discard_and_relse;
1657 #ifdef CONFIG_TCP_MD5SIG
1659 * We really want to reject the packet as early as possible
1661 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1662 * o There is an MD5 option and we're not expecting one
1664 if (tcp_v4_inbound_md5_hash(sk, skb))
1665 goto discard_and_relse;
1670 if (sk_filter(sk, skb))
1671 goto discard_and_relse;
1673 sk_incoming_cpu_update(sk);
1676 bh_lock_sock_nested(sk);
1678 if (!sock_owned_by_user(sk)) {
1679 if (!tcp_prequeue(sk, skb))
1680 ret = tcp_v4_do_rcv(sk, skb);
1681 } else if (unlikely(sk_add_backlog(sk, skb,
1682 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1684 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1685 goto discard_and_relse;
1694 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1697 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1699 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1701 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1703 tcp_v4_send_reset(NULL, skb);
1707 /* Discard frame. */
1716 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1717 inet_twsk_put(inet_twsk(sk));
1721 if (skb->len < (th->doff << 2)) {
1722 inet_twsk_put(inet_twsk(sk));
1725 if (tcp_checksum_complete(skb)) {
1726 inet_twsk_put(inet_twsk(sk));
1729 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1731 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1733 iph->saddr, th->source,
1734 iph->daddr, th->dest,
1737 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1738 inet_twsk_put(inet_twsk(sk));
1742 /* Fall through to ACK */
1745 tcp_v4_timewait_ack(sk, skb);
1749 case TCP_TW_SUCCESS:;
1754 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1755 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1756 .twsk_unique = tcp_twsk_unique,
1757 .twsk_destructor= tcp_twsk_destructor,
1760 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1762 struct dst_entry *dst = skb_dst(skb);
1766 sk->sk_rx_dst = dst;
1767 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1770 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1772 const struct inet_connection_sock_af_ops ipv4_specific = {
1773 .queue_xmit = ip_queue_xmit,
1774 .send_check = tcp_v4_send_check,
1775 .rebuild_header = inet_sk_rebuild_header,
1776 .sk_rx_dst_set = inet_sk_rx_dst_set,
1777 .conn_request = tcp_v4_conn_request,
1778 .syn_recv_sock = tcp_v4_syn_recv_sock,
1779 .net_header_len = sizeof(struct iphdr),
1780 .setsockopt = ip_setsockopt,
1781 .getsockopt = ip_getsockopt,
1782 .addr2sockaddr = inet_csk_addr2sockaddr,
1783 .sockaddr_len = sizeof(struct sockaddr_in),
1784 .bind_conflict = inet_csk_bind_conflict,
1785 #ifdef CONFIG_COMPAT
1786 .compat_setsockopt = compat_ip_setsockopt,
1787 .compat_getsockopt = compat_ip_getsockopt,
1789 .mtu_reduced = tcp_v4_mtu_reduced,
1791 EXPORT_SYMBOL(ipv4_specific);
1793 #ifdef CONFIG_TCP_MD5SIG
1794 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1795 .md5_lookup = tcp_v4_md5_lookup,
1796 .calc_md5_hash = tcp_v4_md5_hash_skb,
1797 .md5_parse = tcp_v4_parse_md5_keys,
1801 /* NOTE: A lot of things set to zero explicitly by call to
1802 * sk_alloc() so need not be done here.
1804 static int tcp_v4_init_sock(struct sock *sk)
1806 struct inet_connection_sock *icsk = inet_csk(sk);
1810 icsk->icsk_af_ops = &ipv4_specific;
1812 #ifdef CONFIG_TCP_MD5SIG
1813 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1819 void tcp_v4_destroy_sock(struct sock *sk)
1821 struct tcp_sock *tp = tcp_sk(sk);
1823 tcp_clear_xmit_timers(sk);
1825 tcp_cleanup_congestion_control(sk);
1827 /* Cleanup up the write buffer. */
1828 tcp_write_queue_purge(sk);
1830 /* Cleans up our, hopefully empty, out_of_order_queue. */
1831 __skb_queue_purge(&tp->out_of_order_queue);
1833 #ifdef CONFIG_TCP_MD5SIG
1834 /* Clean up the MD5 key list, if any */
1835 if (tp->md5sig_info) {
1836 tcp_clear_md5_list(sk);
1837 kfree_rcu(tp->md5sig_info, rcu);
1838 tp->md5sig_info = NULL;
1842 /* Clean prequeue, it must be empty really */
1843 __skb_queue_purge(&tp->ucopy.prequeue);
1845 /* Clean up a referenced TCP bind bucket. */
1846 if (inet_csk(sk)->icsk_bind_hash)
1849 BUG_ON(tp->fastopen_rsk != NULL);
1851 /* If socket is aborted during connect operation */
1852 tcp_free_fastopen_req(tp);
1854 sk_sockets_allocated_dec(sk);
1855 sock_release_memcg(sk);
1857 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1859 #ifdef CONFIG_PROC_FS
1860 /* Proc filesystem TCP sock list dumping. */
1863 * Get next listener socket follow cur. If cur is NULL, get first socket
1864 * starting from bucket given in st->bucket; when st->bucket is zero the
1865 * very first socket in the hash table is returned.
1867 static void *listening_get_next(struct seq_file *seq, void *cur)
1869 struct inet_connection_sock *icsk;
1870 struct hlist_nulls_node *node;
1871 struct sock *sk = cur;
1872 struct inet_listen_hashbucket *ilb;
1873 struct tcp_iter_state *st = seq->private;
1874 struct net *net = seq_file_net(seq);
1877 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1878 spin_lock_bh(&ilb->lock);
1879 sk = sk_nulls_head(&ilb->head);
1883 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1887 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1888 struct request_sock *req = cur;
1890 icsk = inet_csk(st->syn_wait_sk);
1894 if (req->rsk_ops->family == st->family) {
1900 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1903 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1905 sk = sk_nulls_next(st->syn_wait_sk);
1906 st->state = TCP_SEQ_STATE_LISTENING;
1907 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1909 icsk = inet_csk(sk);
1910 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1911 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1913 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1914 sk = sk_nulls_next(sk);
1917 sk_nulls_for_each_from(sk, node) {
1918 if (!net_eq(sock_net(sk), net))
1920 if (sk->sk_family == st->family) {
1924 icsk = inet_csk(sk);
1925 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1926 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1928 st->uid = sock_i_uid(sk);
1929 st->syn_wait_sk = sk;
1930 st->state = TCP_SEQ_STATE_OPENREQ;
1934 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1936 spin_unlock_bh(&ilb->lock);
1938 if (++st->bucket < INET_LHTABLE_SIZE) {
1939 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1940 spin_lock_bh(&ilb->lock);
1941 sk = sk_nulls_head(&ilb->head);
1949 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1951 struct tcp_iter_state *st = seq->private;
1956 rc = listening_get_next(seq, NULL);
1958 while (rc && *pos) {
1959 rc = listening_get_next(seq, rc);
1965 static inline bool empty_bucket(const struct tcp_iter_state *st)
1967 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1971 * Get first established socket starting from bucket given in st->bucket.
1972 * If st->bucket is zero, the very first socket in the hash is returned.
1974 static void *established_get_first(struct seq_file *seq)
1976 struct tcp_iter_state *st = seq->private;
1977 struct net *net = seq_file_net(seq);
1981 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1983 struct hlist_nulls_node *node;
1984 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1986 /* Lockless fast path for the common case of empty buckets */
1987 if (empty_bucket(st))
1991 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1992 if (sk->sk_family != st->family ||
1993 !net_eq(sock_net(sk), net)) {
1999 spin_unlock_bh(lock);
2005 static void *established_get_next(struct seq_file *seq, void *cur)
2007 struct sock *sk = cur;
2008 struct hlist_nulls_node *node;
2009 struct tcp_iter_state *st = seq->private;
2010 struct net *net = seq_file_net(seq);
2015 sk = sk_nulls_next(sk);
2017 sk_nulls_for_each_from(sk, node) {
2018 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2022 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2024 return established_get_first(seq);
2027 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2029 struct tcp_iter_state *st = seq->private;
2033 rc = established_get_first(seq);
2036 rc = established_get_next(seq, rc);
2042 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2045 struct tcp_iter_state *st = seq->private;
2047 st->state = TCP_SEQ_STATE_LISTENING;
2048 rc = listening_get_idx(seq, &pos);
2051 st->state = TCP_SEQ_STATE_ESTABLISHED;
2052 rc = established_get_idx(seq, pos);
2058 static void *tcp_seek_last_pos(struct seq_file *seq)
2060 struct tcp_iter_state *st = seq->private;
2061 int offset = st->offset;
2062 int orig_num = st->num;
2065 switch (st->state) {
2066 case TCP_SEQ_STATE_OPENREQ:
2067 case TCP_SEQ_STATE_LISTENING:
2068 if (st->bucket >= INET_LHTABLE_SIZE)
2070 st->state = TCP_SEQ_STATE_LISTENING;
2071 rc = listening_get_next(seq, NULL);
2072 while (offset-- && rc)
2073 rc = listening_get_next(seq, rc);
2077 st->state = TCP_SEQ_STATE_ESTABLISHED;
2079 case TCP_SEQ_STATE_ESTABLISHED:
2080 if (st->bucket > tcp_hashinfo.ehash_mask)
2082 rc = established_get_first(seq);
2083 while (offset-- && rc)
2084 rc = established_get_next(seq, rc);
2092 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2094 struct tcp_iter_state *st = seq->private;
2097 if (*pos && *pos == st->last_pos) {
2098 rc = tcp_seek_last_pos(seq);
2103 st->state = TCP_SEQ_STATE_LISTENING;
2107 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2110 st->last_pos = *pos;
2114 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2116 struct tcp_iter_state *st = seq->private;
2119 if (v == SEQ_START_TOKEN) {
2120 rc = tcp_get_idx(seq, 0);
2124 switch (st->state) {
2125 case TCP_SEQ_STATE_OPENREQ:
2126 case TCP_SEQ_STATE_LISTENING:
2127 rc = listening_get_next(seq, v);
2129 st->state = TCP_SEQ_STATE_ESTABLISHED;
2132 rc = established_get_first(seq);
2135 case TCP_SEQ_STATE_ESTABLISHED:
2136 rc = established_get_next(seq, v);
2141 st->last_pos = *pos;
2145 static void tcp_seq_stop(struct seq_file *seq, void *v)
2147 struct tcp_iter_state *st = seq->private;
2149 switch (st->state) {
2150 case TCP_SEQ_STATE_OPENREQ:
2152 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2153 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2155 case TCP_SEQ_STATE_LISTENING:
2156 if (v != SEQ_START_TOKEN)
2157 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2159 case TCP_SEQ_STATE_ESTABLISHED:
2161 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2166 int tcp_seq_open(struct inode *inode, struct file *file)
2168 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2169 struct tcp_iter_state *s;
2172 err = seq_open_net(inode, file, &afinfo->seq_ops,
2173 sizeof(struct tcp_iter_state));
2177 s = ((struct seq_file *)file->private_data)->private;
2178 s->family = afinfo->family;
2182 EXPORT_SYMBOL(tcp_seq_open);
2184 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2187 struct proc_dir_entry *p;
2189 afinfo->seq_ops.start = tcp_seq_start;
2190 afinfo->seq_ops.next = tcp_seq_next;
2191 afinfo->seq_ops.stop = tcp_seq_stop;
2193 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2194 afinfo->seq_fops, afinfo);
2199 EXPORT_SYMBOL(tcp_proc_register);
2201 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2203 remove_proc_entry(afinfo->name, net->proc_net);
2205 EXPORT_SYMBOL(tcp_proc_unregister);
2207 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2208 struct seq_file *f, int i, kuid_t uid)
2210 const struct inet_request_sock *ireq = inet_rsk(req);
2211 long delta = req->expires - jiffies;
2213 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2214 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2217 ntohs(inet_sk(sk)->inet_sport),
2219 ntohs(ireq->ir_rmt_port),
2221 0, 0, /* could print option size, but that is af dependent. */
2222 1, /* timers active (only the expire timer) */
2223 jiffies_delta_to_clock_t(delta),
2225 from_kuid_munged(seq_user_ns(f), uid),
2226 0, /* non standard timer */
2227 0, /* open_requests have no inode */
2228 atomic_read(&sk->sk_refcnt),
2232 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2235 unsigned long timer_expires;
2236 const struct tcp_sock *tp = tcp_sk(sk);
2237 const struct inet_connection_sock *icsk = inet_csk(sk);
2238 const struct inet_sock *inet = inet_sk(sk);
2239 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2240 __be32 dest = inet->inet_daddr;
2241 __be32 src = inet->inet_rcv_saddr;
2242 __u16 destp = ntohs(inet->inet_dport);
2243 __u16 srcp = ntohs(inet->inet_sport);
2246 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2247 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2248 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2250 timer_expires = icsk->icsk_timeout;
2251 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2253 timer_expires = icsk->icsk_timeout;
2254 } else if (timer_pending(&sk->sk_timer)) {
2256 timer_expires = sk->sk_timer.expires;
2259 timer_expires = jiffies;
2262 if (sk->sk_state == TCP_LISTEN)
2263 rx_queue = sk->sk_ack_backlog;
2266 * because we dont lock socket, we might find a transient negative value
2268 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2270 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2271 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2272 i, src, srcp, dest, destp, sk->sk_state,
2273 tp->write_seq - tp->snd_una,
2276 jiffies_delta_to_clock_t(timer_expires - jiffies),
2277 icsk->icsk_retransmits,
2278 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2279 icsk->icsk_probes_out,
2281 atomic_read(&sk->sk_refcnt), sk,
2282 jiffies_to_clock_t(icsk->icsk_rto),
2283 jiffies_to_clock_t(icsk->icsk_ack.ato),
2284 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2286 sk->sk_state == TCP_LISTEN ?
2287 (fastopenq ? fastopenq->max_qlen : 0) :
2288 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2291 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2292 struct seq_file *f, int i)
2296 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2298 dest = tw->tw_daddr;
2299 src = tw->tw_rcv_saddr;
2300 destp = ntohs(tw->tw_dport);
2301 srcp = ntohs(tw->tw_sport);
2303 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2304 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2305 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2306 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2307 atomic_read(&tw->tw_refcnt), tw);
2312 static int tcp4_seq_show(struct seq_file *seq, void *v)
2314 struct tcp_iter_state *st;
2315 struct sock *sk = v;
2317 seq_setwidth(seq, TMPSZ - 1);
2318 if (v == SEQ_START_TOKEN) {
2319 seq_puts(seq, " sl local_address rem_address st tx_queue "
2320 "rx_queue tr tm->when retrnsmt uid timeout "
2326 switch (st->state) {
2327 case TCP_SEQ_STATE_LISTENING:
2328 case TCP_SEQ_STATE_ESTABLISHED:
2329 if (sk->sk_state == TCP_TIME_WAIT)
2330 get_timewait4_sock(v, seq, st->num);
2332 get_tcp4_sock(v, seq, st->num);
2334 case TCP_SEQ_STATE_OPENREQ:
2335 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2343 static const struct file_operations tcp_afinfo_seq_fops = {
2344 .owner = THIS_MODULE,
2345 .open = tcp_seq_open,
2347 .llseek = seq_lseek,
2348 .release = seq_release_net
2351 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2354 .seq_fops = &tcp_afinfo_seq_fops,
2356 .show = tcp4_seq_show,
2360 static int __net_init tcp4_proc_init_net(struct net *net)
2362 return tcp_proc_register(net, &tcp4_seq_afinfo);
2365 static void __net_exit tcp4_proc_exit_net(struct net *net)
2367 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2370 static struct pernet_operations tcp4_net_ops = {
2371 .init = tcp4_proc_init_net,
2372 .exit = tcp4_proc_exit_net,
2375 int __init tcp4_proc_init(void)
2377 return register_pernet_subsys(&tcp4_net_ops);
2380 void tcp4_proc_exit(void)
2382 unregister_pernet_subsys(&tcp4_net_ops);
2384 #endif /* CONFIG_PROC_FS */
2386 struct proto tcp_prot = {
2388 .owner = THIS_MODULE,
2390 .connect = tcp_v4_connect,
2391 .disconnect = tcp_disconnect,
2392 .accept = inet_csk_accept,
2394 .init = tcp_v4_init_sock,
2395 .destroy = tcp_v4_destroy_sock,
2396 .shutdown = tcp_shutdown,
2397 .setsockopt = tcp_setsockopt,
2398 .getsockopt = tcp_getsockopt,
2399 .recvmsg = tcp_recvmsg,
2400 .sendmsg = tcp_sendmsg,
2401 .sendpage = tcp_sendpage,
2402 .backlog_rcv = tcp_v4_do_rcv,
2403 .release_cb = tcp_release_cb,
2405 .unhash = inet_unhash,
2406 .get_port = inet_csk_get_port,
2407 .enter_memory_pressure = tcp_enter_memory_pressure,
2408 .stream_memory_free = tcp_stream_memory_free,
2409 .sockets_allocated = &tcp_sockets_allocated,
2410 .orphan_count = &tcp_orphan_count,
2411 .memory_allocated = &tcp_memory_allocated,
2412 .memory_pressure = &tcp_memory_pressure,
2413 .sysctl_mem = sysctl_tcp_mem,
2414 .sysctl_wmem = sysctl_tcp_wmem,
2415 .sysctl_rmem = sysctl_tcp_rmem,
2416 .max_header = MAX_TCP_HEADER,
2417 .obj_size = sizeof(struct tcp_sock),
2418 .slab_flags = SLAB_DESTROY_BY_RCU,
2419 .twsk_prot = &tcp_timewait_sock_ops,
2420 .rsk_prot = &tcp_request_sock_ops,
2421 .h.hashinfo = &tcp_hashinfo,
2422 .no_autobind = true,
2423 #ifdef CONFIG_COMPAT
2424 .compat_setsockopt = compat_tcp_setsockopt,
2425 .compat_getsockopt = compat_tcp_getsockopt,
2427 #ifdef CONFIG_MEMCG_KMEM
2428 .init_cgroup = tcp_init_cgroup,
2429 .destroy_cgroup = tcp_destroy_cgroup,
2430 .proto_cgroup = tcp_proto_cgroup,
2433 EXPORT_SYMBOL(tcp_prot);
2435 static void __net_exit tcp_sk_exit(struct net *net)
2439 for_each_possible_cpu(cpu)
2440 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2441 free_percpu(net->ipv4.tcp_sk);
2444 static int __net_init tcp_sk_init(struct net *net)
2448 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2449 if (!net->ipv4.tcp_sk)
2452 for_each_possible_cpu(cpu) {
2455 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2459 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2461 net->ipv4.sysctl_tcp_ecn = 2;
2462 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2471 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2473 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2476 static struct pernet_operations __net_initdata tcp_sk_ops = {
2477 .init = tcp_sk_init,
2478 .exit = tcp_sk_exit,
2479 .exit_batch = tcp_sk_exit_batch,
2482 void __init tcp_v4_init(void)
2484 inet_hashinfo_init(&tcp_hashinfo);
2485 if (register_pernet_subsys(&tcp_sk_ops))
2486 panic("Failed to create the TCP control socket.\n");