2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
106 tcp_hdr(skb)->source);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 static int tcp_repair_connect(struct sock *sk)
143 tcp_connect_init(sk);
144 tcp_finish_connect(sk, NULL);
149 /* This will initiate an outgoing connection. */
150 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
152 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
153 struct inet_sock *inet = inet_sk(sk);
154 struct tcp_sock *tp = tcp_sk(sk);
155 __be16 orig_sport, orig_dport;
156 __be32 daddr, nexthop;
160 struct ip_options_rcu *inet_opt;
162 if (addr_len < sizeof(struct sockaddr_in))
165 if (usin->sin_family != AF_INET)
166 return -EAFNOSUPPORT;
168 nexthop = daddr = usin->sin_addr.s_addr;
169 inet_opt = rcu_dereference_protected(inet->inet_opt,
170 sock_owned_by_user(sk));
171 if (inet_opt && inet_opt->opt.srr) {
174 nexthop = inet_opt->opt.faddr;
177 orig_sport = inet->inet_sport;
178 orig_dport = usin->sin_port;
179 fl4 = &inet->cork.fl.u.ip4;
180 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
181 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
183 orig_sport, orig_dport, sk, true);
186 if (err == -ENETUNREACH)
187 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
191 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
196 if (!inet_opt || !inet_opt->opt.srr)
199 if (!inet->inet_saddr)
200 inet->inet_saddr = fl4->saddr;
201 inet->inet_rcv_saddr = inet->inet_saddr;
203 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
204 /* Reset inherited state */
205 tp->rx_opt.ts_recent = 0;
206 tp->rx_opt.ts_recent_stamp = 0;
207 if (likely(!tp->repair))
211 if (tcp_death_row.sysctl_tw_recycle &&
212 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
213 tcp_fetch_timewait_stamp(sk, &rt->dst);
215 inet->inet_dport = usin->sin_port;
216 inet->inet_daddr = daddr;
218 inet_csk(sk)->icsk_ext_hdr_len = 0;
220 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
222 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
224 /* Socket identity is still unknown (sport may be zero).
225 * However we set state to SYN-SENT and not releasing socket
226 * lock select source port, enter ourselves into the hash tables and
227 * complete initialization after this.
229 tcp_set_state(sk, TCP_SYN_SENT);
230 err = inet_hash_connect(&tcp_death_row, sk);
234 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
235 inet->inet_sport, inet->inet_dport, sk);
241 /* OK, now commit destination to socket. */
242 sk->sk_gso_type = SKB_GSO_TCPV4;
243 sk_setup_caps(sk, &rt->dst);
245 if (!tp->write_seq && likely(!tp->repair))
246 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
251 inet->inet_id = tp->write_seq ^ jiffies;
253 if (likely(!tp->repair))
254 err = tcp_connect(sk);
256 err = tcp_repair_connect(sk);
266 * This unhashes the socket and releases the local port,
269 tcp_set_state(sk, TCP_CLOSE);
271 sk->sk_route_caps = 0;
272 inet->inet_dport = 0;
275 EXPORT_SYMBOL(tcp_v4_connect);
278 * This routine does path mtu discovery as defined in RFC1191.
280 static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
282 struct dst_entry *dst;
283 struct inet_sock *inet = inet_sk(sk);
285 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
286 * send out by Linux are always <576bytes so they should go through
289 if (sk->sk_state == TCP_LISTEN)
292 /* We don't check in the destentry if pmtu discovery is forbidden
293 * on this route. We just assume that no packet_to_big packets
294 * are send back when pmtu discovery is not active.
295 * There is a small race when the user changes this flag in the
296 * route, but I think that's acceptable.
298 if ((dst = __sk_dst_check(sk, 0)) == NULL)
301 dst->ops->update_pmtu(dst, mtu);
303 /* Something is about to be wrong... Remember soft error
304 * for the case, if this connection will not able to recover.
306 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
307 sk->sk_err_soft = EMSGSIZE;
311 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
312 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
313 tcp_sync_mss(sk, mtu);
315 /* Resend the TCP packet because it's
316 * clear that the old packet has been
317 * dropped. This is the new "fast" path mtu
320 tcp_simple_retransmit(sk);
321 } /* else let the usual retransmit timer handle it */
325 * This routine is called by the ICMP module when it gets some
326 * sort of error condition. If err < 0 then the socket should
327 * be closed and the error returned to the user. If err > 0
328 * it's just the icmp type << 8 | icmp code. After adjustment
329 * header points to the first 8 bytes of the tcp header. We need
330 * to find the appropriate port.
332 * The locking strategy used here is very "optimistic". When
333 * someone else accesses the socket the ICMP is just dropped
334 * and for some paths there is no check at all.
335 * A more general error queue to queue errors for later handling
336 * is probably better.
340 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
342 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
343 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
344 struct inet_connection_sock *icsk;
346 struct inet_sock *inet;
347 const int type = icmp_hdr(icmp_skb)->type;
348 const int code = icmp_hdr(icmp_skb)->code;
354 struct net *net = dev_net(icmp_skb->dev);
356 if (icmp_skb->len < (iph->ihl << 2) + 8) {
357 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
361 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
362 iph->saddr, th->source, inet_iif(icmp_skb));
364 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
367 if (sk->sk_state == TCP_TIME_WAIT) {
368 inet_twsk_put(inet_twsk(sk));
373 /* If too many ICMPs get dropped on busy
374 * servers this needs to be solved differently.
376 if (sock_owned_by_user(sk))
377 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
379 if (sk->sk_state == TCP_CLOSE)
382 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
383 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
389 seq = ntohl(th->seq);
390 if (sk->sk_state != TCP_LISTEN &&
391 !between(seq, tp->snd_una, tp->snd_nxt)) {
392 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
397 case ICMP_SOURCE_QUENCH:
398 /* Just silently ignore these. */
400 case ICMP_PARAMETERPROB:
403 case ICMP_DEST_UNREACH:
404 if (code > NR_ICMP_UNREACH)
407 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
408 if (!sock_owned_by_user(sk))
409 do_pmtu_discovery(sk, iph, info);
413 err = icmp_err_convert[code].errno;
414 /* check if icmp_skb allows revert of backoff
415 * (see draft-zimmermann-tcp-lcd) */
416 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
418 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
422 if (sock_owned_by_user(sk))
425 icsk->icsk_backoff--;
426 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
427 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
430 skb = tcp_write_queue_head(sk);
433 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
434 tcp_time_stamp - TCP_SKB_CB(skb)->when);
437 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
438 remaining, TCP_RTO_MAX);
440 /* RTO revert clocked out retransmission.
441 * Will retransmit now */
442 tcp_retransmit_timer(sk);
446 case ICMP_TIME_EXCEEDED:
453 switch (sk->sk_state) {
454 struct request_sock *req, **prev;
456 if (sock_owned_by_user(sk))
459 req = inet_csk_search_req(sk, &prev, th->dest,
460 iph->daddr, iph->saddr);
464 /* ICMPs are not backlogged, hence we cannot get
465 an established socket here.
469 if (seq != tcp_rsk(req)->snt_isn) {
470 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
475 * Still in SYN_RECV, just remove it silently.
476 * There is no good way to pass the error to the newly
477 * created socket, and POSIX does not want network
478 * errors returned from accept().
480 inet_csk_reqsk_queue_drop(sk, req, prev);
484 case TCP_SYN_RECV: /* Cannot happen.
485 It can f.e. if SYNs crossed.
487 if (!sock_owned_by_user(sk)) {
490 sk->sk_error_report(sk);
494 sk->sk_err_soft = err;
499 /* If we've already connected we will keep trying
500 * until we time out, or the user gives up.
502 * rfc1122 4.2.3.9 allows to consider as hard errors
503 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
504 * but it is obsoleted by pmtu discovery).
506 * Note, that in modern internet, where routing is unreliable
507 * and in each dark corner broken firewalls sit, sending random
508 * errors ordered by their masters even this two messages finally lose
509 * their original sense (even Linux sends invalid PORT_UNREACHs)
511 * Now we are in compliance with RFCs.
516 if (!sock_owned_by_user(sk) && inet->recverr) {
518 sk->sk_error_report(sk);
519 } else { /* Only an error on timeout */
520 sk->sk_err_soft = err;
528 static void __tcp_v4_send_check(struct sk_buff *skb,
529 __be32 saddr, __be32 daddr)
531 struct tcphdr *th = tcp_hdr(skb);
533 if (skb->ip_summed == CHECKSUM_PARTIAL) {
534 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
535 skb->csum_start = skb_transport_header(skb) - skb->head;
536 skb->csum_offset = offsetof(struct tcphdr, check);
538 th->check = tcp_v4_check(skb->len, saddr, daddr,
545 /* This routine computes an IPv4 TCP checksum. */
546 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
548 const struct inet_sock *inet = inet_sk(sk);
550 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
552 EXPORT_SYMBOL(tcp_v4_send_check);
554 int tcp_v4_gso_send_check(struct sk_buff *skb)
556 const struct iphdr *iph;
559 if (!pskb_may_pull(skb, sizeof(*th)))
566 skb->ip_summed = CHECKSUM_PARTIAL;
567 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
572 * This routine will send an RST to the other tcp.
574 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
576 * Answer: if a packet caused RST, it is not for a socket
577 * existing in our system, if it is matched to a socket,
578 * it is just duplicate segment or bug in other side's TCP.
579 * So that we build reply only basing on parameters
580 * arrived with segment.
581 * Exception: precedence violation. We do not implement it in any case.
584 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
586 const struct tcphdr *th = tcp_hdr(skb);
589 #ifdef CONFIG_TCP_MD5SIG
590 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
593 struct ip_reply_arg arg;
594 #ifdef CONFIG_TCP_MD5SIG
595 struct tcp_md5sig_key *key;
596 const __u8 *hash_location = NULL;
597 unsigned char newhash[16];
599 struct sock *sk1 = NULL;
603 /* Never send a reset in response to a reset. */
607 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
610 /* Swap the send and the receive. */
611 memset(&rep, 0, sizeof(rep));
612 rep.th.dest = th->source;
613 rep.th.source = th->dest;
614 rep.th.doff = sizeof(struct tcphdr) / 4;
618 rep.th.seq = th->ack_seq;
621 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
622 skb->len - (th->doff << 2));
625 memset(&arg, 0, sizeof(arg));
626 arg.iov[0].iov_base = (unsigned char *)&rep;
627 arg.iov[0].iov_len = sizeof(rep.th);
629 #ifdef CONFIG_TCP_MD5SIG
630 hash_location = tcp_parse_md5sig_option(th);
631 if (!sk && hash_location) {
633 * active side is lost. Try to find listening socket through
634 * source port, and then find md5 key through listening socket.
635 * we are not loose security here:
636 * Incoming packet is checked with md5 hash with finding key,
637 * no RST generated if md5 hash doesn't match.
639 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
640 &tcp_hashinfo, ip_hdr(skb)->daddr,
641 ntohs(th->source), inet_iif(skb));
642 /* don't send rst if it can't find key */
646 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 &ip_hdr(skb)->saddr, AF_INET);
651 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
652 if (genhash || memcmp(hash_location, newhash, 16) != 0)
655 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
661 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
663 (TCPOPT_MD5SIG << 8) |
665 /* Update length and the length the header thinks exists */
666 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 rep.th.doff = arg.iov[0].iov_len / 4;
669 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670 key, ip_hdr(skb)->saddr,
671 ip_hdr(skb)->daddr, &rep.th);
674 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 ip_hdr(skb)->saddr, /* XXX */
676 arg.iov[0].iov_len, IPPROTO_TCP, 0);
677 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679 /* When socket is gone, all binding information is lost.
680 * routing might fail in this case. using iif for oif to
681 * make sure we can deliver it
683 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
685 net = dev_net(skb_dst(skb)->dev);
686 arg.tos = ip_hdr(skb)->tos;
687 ip_send_unicast_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
688 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
690 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
691 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
693 #ifdef CONFIG_TCP_MD5SIG
702 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
703 outside socket context is ugly, certainly. What can I do?
706 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
707 u32 win, u32 ts, int oif,
708 struct tcp_md5sig_key *key,
709 int reply_flags, u8 tos)
711 const struct tcphdr *th = tcp_hdr(skb);
714 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
715 #ifdef CONFIG_TCP_MD5SIG
716 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
720 struct ip_reply_arg arg;
721 struct net *net = dev_net(skb_dst(skb)->dev);
723 memset(&rep.th, 0, sizeof(struct tcphdr));
724 memset(&arg, 0, sizeof(arg));
726 arg.iov[0].iov_base = (unsigned char *)&rep;
727 arg.iov[0].iov_len = sizeof(rep.th);
729 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
730 (TCPOPT_TIMESTAMP << 8) |
732 rep.opt[1] = htonl(tcp_time_stamp);
733 rep.opt[2] = htonl(ts);
734 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737 /* Swap the send and the receive. */
738 rep.th.dest = th->source;
739 rep.th.source = th->dest;
740 rep.th.doff = arg.iov[0].iov_len / 4;
741 rep.th.seq = htonl(seq);
742 rep.th.ack_seq = htonl(ack);
744 rep.th.window = htons(win);
746 #ifdef CONFIG_TCP_MD5SIG
748 int offset = (ts) ? 3 : 0;
750 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
752 (TCPOPT_MD5SIG << 8) |
754 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
755 rep.th.doff = arg.iov[0].iov_len/4;
757 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
758 key, ip_hdr(skb)->saddr,
759 ip_hdr(skb)->daddr, &rep.th);
762 arg.flags = reply_flags;
763 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
764 ip_hdr(skb)->saddr, /* XXX */
765 arg.iov[0].iov_len, IPPROTO_TCP, 0);
766 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
768 arg.bound_dev_if = oif;
770 ip_send_unicast_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
771 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
773 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
776 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
778 struct inet_timewait_sock *tw = inet_twsk(sk);
779 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
781 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
782 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
785 tcp_twsk_md5_key(tcptw),
786 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
793 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
794 struct request_sock *req)
796 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
797 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
800 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
802 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
807 * Send a SYN-ACK after having received a SYN.
808 * This still operates on a request_sock only, not on a big
811 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
812 struct request_sock *req,
813 struct request_values *rvp,
817 const struct inet_request_sock *ireq = inet_rsk(req);
820 struct sk_buff * skb;
822 /* First, grab a route. */
823 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req, nocache)) == NULL)
826 skb = tcp_make_synack(sk, dst, req, rvp);
829 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
831 skb_set_queue_mapping(skb, queue_mapping);
832 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
835 err = net_xmit_eval(err);
841 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
842 struct request_values *rvp)
844 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
845 return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
849 * IPv4 request_sock destructor.
851 static void tcp_v4_reqsk_destructor(struct request_sock *req)
853 kfree(inet_rsk(req)->opt);
857 * Return true if a syncookie should be sent
859 bool tcp_syn_flood_action(struct sock *sk,
860 const struct sk_buff *skb,
863 const char *msg = "Dropping request";
864 bool want_cookie = false;
865 struct listen_sock *lopt;
869 #ifdef CONFIG_SYN_COOKIES
870 if (sysctl_tcp_syncookies) {
871 msg = "Sending cookies";
873 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
876 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
878 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
879 if (!lopt->synflood_warned) {
880 lopt->synflood_warned = 1;
881 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
882 proto, ntohs(tcp_hdr(skb)->dest), msg);
886 EXPORT_SYMBOL(tcp_syn_flood_action);
889 * Save and compile IPv4 options into the request_sock if needed.
891 static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
894 const struct ip_options *opt = &(IPCB(skb)->opt);
895 struct ip_options_rcu *dopt = NULL;
897 if (opt && opt->optlen) {
898 int opt_size = sizeof(*dopt) + opt->optlen;
900 dopt = kmalloc(opt_size, GFP_ATOMIC);
902 if (ip_options_echo(&dopt->opt, skb)) {
911 #ifdef CONFIG_TCP_MD5SIG
913 * RFC2385 MD5 checksumming requires a mapping of
914 * IP address->MD5 Key.
915 * We need to maintain these in the sk structure.
918 /* Find the Key structure for an address. */
919 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
920 const union tcp_md5_addr *addr,
923 struct tcp_sock *tp = tcp_sk(sk);
924 struct tcp_md5sig_key *key;
925 struct hlist_node *pos;
926 unsigned int size = sizeof(struct in_addr);
927 struct tcp_md5sig_info *md5sig;
929 /* caller either holds rcu_read_lock() or socket lock */
930 md5sig = rcu_dereference_check(tp->md5sig_info,
931 sock_owned_by_user(sk) ||
932 lockdep_is_held(&sk->sk_lock.slock));
935 #if IS_ENABLED(CONFIG_IPV6)
936 if (family == AF_INET6)
937 size = sizeof(struct in6_addr);
939 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
940 if (key->family != family)
942 if (!memcmp(&key->addr, addr, size))
947 EXPORT_SYMBOL(tcp_md5_do_lookup);
949 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
950 struct sock *addr_sk)
952 union tcp_md5_addr *addr;
954 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
955 return tcp_md5_do_lookup(sk, addr, AF_INET);
957 EXPORT_SYMBOL(tcp_v4_md5_lookup);
959 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
960 struct request_sock *req)
962 union tcp_md5_addr *addr;
964 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
965 return tcp_md5_do_lookup(sk, addr, AF_INET);
968 /* This can be called on a newly created socket, from other files */
969 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
970 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
972 /* Add Key to the list */
973 struct tcp_md5sig_key *key;
974 struct tcp_sock *tp = tcp_sk(sk);
975 struct tcp_md5sig_info *md5sig;
977 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
979 /* Pre-existing entry - just update that one. */
980 memcpy(key->key, newkey, newkeylen);
981 key->keylen = newkeylen;
985 md5sig = rcu_dereference_protected(tp->md5sig_info,
986 sock_owned_by_user(sk));
988 md5sig = kmalloc(sizeof(*md5sig), gfp);
992 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
993 INIT_HLIST_HEAD(&md5sig->head);
994 rcu_assign_pointer(tp->md5sig_info, md5sig);
997 key = sock_kmalloc(sk, sizeof(*key), gfp);
1000 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1001 sock_kfree_s(sk, key, sizeof(*key));
1005 memcpy(key->key, newkey, newkeylen);
1006 key->keylen = newkeylen;
1007 key->family = family;
1008 memcpy(&key->addr, addr,
1009 (family == AF_INET6) ? sizeof(struct in6_addr) :
1010 sizeof(struct in_addr));
1011 hlist_add_head_rcu(&key->node, &md5sig->head);
1014 EXPORT_SYMBOL(tcp_md5_do_add);
1016 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1018 struct tcp_sock *tp = tcp_sk(sk);
1019 struct tcp_md5sig_key *key;
1020 struct tcp_md5sig_info *md5sig;
1022 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1025 hlist_del_rcu(&key->node);
1026 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1027 kfree_rcu(key, rcu);
1028 md5sig = rcu_dereference_protected(tp->md5sig_info,
1029 sock_owned_by_user(sk));
1030 if (hlist_empty(&md5sig->head))
1031 tcp_free_md5sig_pool();
1034 EXPORT_SYMBOL(tcp_md5_do_del);
1036 void tcp_clear_md5_list(struct sock *sk)
1038 struct tcp_sock *tp = tcp_sk(sk);
1039 struct tcp_md5sig_key *key;
1040 struct hlist_node *pos, *n;
1041 struct tcp_md5sig_info *md5sig;
1043 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1045 if (!hlist_empty(&md5sig->head))
1046 tcp_free_md5sig_pool();
1047 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1048 hlist_del_rcu(&key->node);
1049 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1050 kfree_rcu(key, rcu);
1054 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1057 struct tcp_md5sig cmd;
1058 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1060 if (optlen < sizeof(cmd))
1063 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1066 if (sin->sin_family != AF_INET)
1069 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1070 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1073 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1076 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1077 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1081 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1082 __be32 daddr, __be32 saddr, int nbytes)
1084 struct tcp4_pseudohdr *bp;
1085 struct scatterlist sg;
1087 bp = &hp->md5_blk.ip4;
1090 * 1. the TCP pseudo-header (in the order: source IP address,
1091 * destination IP address, zero-padded protocol number, and
1097 bp->protocol = IPPROTO_TCP;
1098 bp->len = cpu_to_be16(nbytes);
1100 sg_init_one(&sg, bp, sizeof(*bp));
1101 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1104 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1105 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1107 struct tcp_md5sig_pool *hp;
1108 struct hash_desc *desc;
1110 hp = tcp_get_md5sig_pool();
1112 goto clear_hash_noput;
1113 desc = &hp->md5_desc;
1115 if (crypto_hash_init(desc))
1117 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1119 if (tcp_md5_hash_header(hp, th))
1121 if (tcp_md5_hash_key(hp, key))
1123 if (crypto_hash_final(desc, md5_hash))
1126 tcp_put_md5sig_pool();
1130 tcp_put_md5sig_pool();
1132 memset(md5_hash, 0, 16);
1136 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1137 const struct sock *sk, const struct request_sock *req,
1138 const struct sk_buff *skb)
1140 struct tcp_md5sig_pool *hp;
1141 struct hash_desc *desc;
1142 const struct tcphdr *th = tcp_hdr(skb);
1143 __be32 saddr, daddr;
1146 saddr = inet_sk(sk)->inet_saddr;
1147 daddr = inet_sk(sk)->inet_daddr;
1149 saddr = inet_rsk(req)->loc_addr;
1150 daddr = inet_rsk(req)->rmt_addr;
1152 const struct iphdr *iph = ip_hdr(skb);
1157 hp = tcp_get_md5sig_pool();
1159 goto clear_hash_noput;
1160 desc = &hp->md5_desc;
1162 if (crypto_hash_init(desc))
1165 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1167 if (tcp_md5_hash_header(hp, th))
1169 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1171 if (tcp_md5_hash_key(hp, key))
1173 if (crypto_hash_final(desc, md5_hash))
1176 tcp_put_md5sig_pool();
1180 tcp_put_md5sig_pool();
1182 memset(md5_hash, 0, 16);
1185 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1187 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1190 * This gets called for each TCP segment that arrives
1191 * so we want to be efficient.
1192 * We have 3 drop cases:
1193 * o No MD5 hash and one expected.
1194 * o MD5 hash and we're not expecting one.
1195 * o MD5 hash and its wrong.
1197 const __u8 *hash_location = NULL;
1198 struct tcp_md5sig_key *hash_expected;
1199 const struct iphdr *iph = ip_hdr(skb);
1200 const struct tcphdr *th = tcp_hdr(skb);
1202 unsigned char newhash[16];
1204 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1206 hash_location = tcp_parse_md5sig_option(th);
1208 /* We've parsed the options - do we have a hash? */
1209 if (!hash_expected && !hash_location)
1212 if (hash_expected && !hash_location) {
1213 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1217 if (!hash_expected && hash_location) {
1218 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1222 /* Okay, so this is hash_expected and hash_location -
1223 * so we need to calculate the checksum.
1225 genhash = tcp_v4_md5_hash_skb(newhash,
1229 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1230 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1231 &iph->saddr, ntohs(th->source),
1232 &iph->daddr, ntohs(th->dest),
1233 genhash ? " tcp_v4_calc_md5_hash failed"
1242 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1244 .obj_size = sizeof(struct tcp_request_sock),
1245 .rtx_syn_ack = tcp_v4_rtx_synack,
1246 .send_ack = tcp_v4_reqsk_send_ack,
1247 .destructor = tcp_v4_reqsk_destructor,
1248 .send_reset = tcp_v4_send_reset,
1249 .syn_ack_timeout = tcp_syn_ack_timeout,
1252 #ifdef CONFIG_TCP_MD5SIG
1253 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1254 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1255 .calc_md5_hash = tcp_v4_md5_hash_skb,
1259 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1261 struct tcp_extend_values tmp_ext;
1262 struct tcp_options_received tmp_opt;
1263 const u8 *hash_location;
1264 struct request_sock *req;
1265 struct inet_request_sock *ireq;
1266 struct tcp_sock *tp = tcp_sk(sk);
1267 struct dst_entry *dst = NULL;
1268 __be32 saddr = ip_hdr(skb)->saddr;
1269 __be32 daddr = ip_hdr(skb)->daddr;
1270 __u32 isn = TCP_SKB_CB(skb)->when;
1271 bool want_cookie = false;
1273 /* Never answer to SYNs send to broadcast or multicast */
1274 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1277 /* TW buckets are converted to open requests without
1278 * limitations, they conserve resources and peer is
1279 * evidently real one.
1281 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1282 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1287 /* Accept backlog is full. If we have already queued enough
1288 * of warm entries in syn queue, drop request. It is better than
1289 * clogging syn queue with openreqs with exponentially increasing
1292 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1295 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1299 #ifdef CONFIG_TCP_MD5SIG
1300 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1303 tcp_clear_options(&tmp_opt);
1304 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1305 tmp_opt.user_mss = tp->rx_opt.user_mss;
1306 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1308 if (tmp_opt.cookie_plus > 0 &&
1309 tmp_opt.saw_tstamp &&
1310 !tp->rx_opt.cookie_out_never &&
1311 (sysctl_tcp_cookie_size > 0 ||
1312 (tp->cookie_values != NULL &&
1313 tp->cookie_values->cookie_desired > 0))) {
1315 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1316 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1318 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1319 goto drop_and_release;
1321 /* Secret recipe starts with IP addresses */
1322 *mess++ ^= (__force u32)daddr;
1323 *mess++ ^= (__force u32)saddr;
1325 /* plus variable length Initiator Cookie */
1328 *c++ ^= *hash_location++;
1330 want_cookie = false; /* not our kind of cookie */
1331 tmp_ext.cookie_out_never = 0; /* false */
1332 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1333 } else if (!tp->rx_opt.cookie_in_always) {
1334 /* redundant indications, but ensure initialization. */
1335 tmp_ext.cookie_out_never = 1; /* true */
1336 tmp_ext.cookie_plus = 0;
1338 goto drop_and_release;
1340 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1342 if (want_cookie && !tmp_opt.saw_tstamp)
1343 tcp_clear_options(&tmp_opt);
1345 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1346 tcp_openreq_init(req, &tmp_opt, skb);
1348 ireq = inet_rsk(req);
1349 ireq->loc_addr = daddr;
1350 ireq->rmt_addr = saddr;
1351 ireq->no_srccheck = inet_sk(sk)->transparent;
1352 ireq->opt = tcp_v4_save_options(sk, skb);
1354 if (security_inet_conn_request(sk, skb, req))
1357 if (!want_cookie || tmp_opt.tstamp_ok)
1358 TCP_ECN_create_request(req, skb);
1361 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1362 req->cookie_ts = tmp_opt.tstamp_ok;
1366 /* VJ's idea. We save last timestamp seen
1367 * from the destination in peer table, when entering
1368 * state TIME-WAIT, and check against it before
1369 * accepting new connection request.
1371 * If "isn" is not zero, this request hit alive
1372 * timewait bucket, so that all the necessary checks
1373 * are made in the function processing timewait state.
1375 if (tmp_opt.saw_tstamp &&
1376 tcp_death_row.sysctl_tw_recycle &&
1377 (dst = inet_csk_route_req(sk, &fl4, req, want_cookie)) != NULL &&
1378 fl4.daddr == saddr) {
1379 if (!tcp_peer_is_proven(req, dst, true)) {
1380 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1381 goto drop_and_release;
1384 /* Kill the following clause, if you dislike this way. */
1385 else if (!sysctl_tcp_syncookies &&
1386 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1387 (sysctl_max_syn_backlog >> 2)) &&
1388 !tcp_peer_is_proven(req, dst, false)) {
1389 /* Without syncookies last quarter of
1390 * backlog is filled with destinations,
1391 * proven to be alive.
1392 * It means that we continue to communicate
1393 * to destinations, already remembered
1394 * to the moment of synflood.
1396 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1397 &saddr, ntohs(tcp_hdr(skb)->source));
1398 goto drop_and_release;
1401 isn = tcp_v4_init_sequence(skb);
1403 tcp_rsk(req)->snt_isn = isn;
1404 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1406 if (tcp_v4_send_synack(sk, dst, req,
1407 (struct request_values *)&tmp_ext,
1408 skb_get_queue_mapping(skb),
1413 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1423 EXPORT_SYMBOL(tcp_v4_conn_request);
1427 * The three way handshake has completed - we got a valid synack -
1428 * now create the new socket.
1430 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1431 struct request_sock *req,
1432 struct dst_entry *dst)
1434 struct inet_request_sock *ireq;
1435 struct inet_sock *newinet;
1436 struct tcp_sock *newtp;
1438 #ifdef CONFIG_TCP_MD5SIG
1439 struct tcp_md5sig_key *key;
1441 struct ip_options_rcu *inet_opt;
1443 if (sk_acceptq_is_full(sk))
1446 newsk = tcp_create_openreq_child(sk, req, skb);
1450 newsk->sk_gso_type = SKB_GSO_TCPV4;
1452 newtp = tcp_sk(newsk);
1453 newinet = inet_sk(newsk);
1454 ireq = inet_rsk(req);
1455 newinet->inet_daddr = ireq->rmt_addr;
1456 newinet->inet_rcv_saddr = ireq->loc_addr;
1457 newinet->inet_saddr = ireq->loc_addr;
1458 inet_opt = ireq->opt;
1459 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1461 newinet->mc_index = inet_iif(skb);
1462 newinet->mc_ttl = ip_hdr(skb)->ttl;
1463 newinet->rcv_tos = ip_hdr(skb)->tos;
1464 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1466 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1467 newinet->inet_id = newtp->write_seq ^ jiffies;
1470 dst = inet_csk_route_child_sock(sk, newsk, req);
1474 /* syncookie case : see end of cookie_v4_check() */
1476 sk_setup_caps(newsk, dst);
1478 tcp_mtup_init(newsk);
1479 tcp_sync_mss(newsk, dst_mtu(dst));
1480 newtp->advmss = dst_metric_advmss(dst);
1481 if (tcp_sk(sk)->rx_opt.user_mss &&
1482 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1483 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1485 tcp_initialize_rcv_mss(newsk);
1486 if (tcp_rsk(req)->snt_synack)
1487 tcp_valid_rtt_meas(newsk,
1488 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1489 newtp->total_retrans = req->retrans;
1491 #ifdef CONFIG_TCP_MD5SIG
1492 /* Copy over the MD5 key from the original socket */
1493 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1497 * We're using one, so create a matching key
1498 * on the newsk structure. If we fail to get
1499 * memory, then we end up not copying the key
1502 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1503 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1504 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1508 if (__inet_inherit_port(sk, newsk) < 0)
1510 __inet_hash_nolisten(newsk, NULL);
1515 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1519 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1522 tcp_clear_xmit_timers(newsk);
1523 tcp_cleanup_congestion_control(newsk);
1524 bh_unlock_sock(newsk);
1528 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1530 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1532 struct tcphdr *th = tcp_hdr(skb);
1533 const struct iphdr *iph = ip_hdr(skb);
1535 struct request_sock **prev;
1536 /* Find possible connection requests. */
1537 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1538 iph->saddr, iph->daddr);
1540 return tcp_check_req(sk, skb, req, prev);
1542 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1543 th->source, iph->daddr, th->dest, inet_iif(skb));
1546 if (nsk->sk_state != TCP_TIME_WAIT) {
1550 inet_twsk_put(inet_twsk(nsk));
1554 #ifdef CONFIG_SYN_COOKIES
1556 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1561 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1563 const struct iphdr *iph = ip_hdr(skb);
1565 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1566 if (!tcp_v4_check(skb->len, iph->saddr,
1567 iph->daddr, skb->csum)) {
1568 skb->ip_summed = CHECKSUM_UNNECESSARY;
1573 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1574 skb->len, IPPROTO_TCP, 0);
1576 if (skb->len <= 76) {
1577 return __skb_checksum_complete(skb);
1583 /* The socket must have it's spinlock held when we get
1586 * We have a potential double-lock case here, so even when
1587 * doing backlog processing we use the BH locking scheme.
1588 * This is because we cannot sleep with the original spinlock
1591 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1594 #ifdef CONFIG_TCP_MD5SIG
1596 * We really want to reject the packet as early as possible
1598 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1599 * o There is an MD5 option and we're not expecting one
1601 if (tcp_v4_inbound_md5_hash(sk, skb))
1605 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1606 sock_rps_save_rxhash(sk, skb);
1607 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1614 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1617 if (sk->sk_state == TCP_LISTEN) {
1618 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1623 sock_rps_save_rxhash(nsk, skb);
1624 if (tcp_child_process(sk, nsk, skb)) {
1631 sock_rps_save_rxhash(sk, skb);
1633 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1640 tcp_v4_send_reset(rsk, skb);
1643 /* Be careful here. If this function gets more complicated and
1644 * gcc suffers from register pressure on the x86, sk (in %ebx)
1645 * might be destroyed here. This current version compiles correctly,
1646 * but you have been warned.
1651 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1654 EXPORT_SYMBOL(tcp_v4_do_rcv);
1656 void tcp_v4_early_demux(struct sk_buff *skb)
1658 struct net *net = dev_net(skb->dev);
1659 const struct iphdr *iph;
1660 const struct tcphdr *th;
1661 struct net_device *dev;
1664 if (skb->pkt_type != PACKET_HOST)
1667 if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
1671 th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
1673 if (th->doff < sizeof(struct tcphdr) / 4)
1676 if (!pskb_may_pull(skb, ip_hdrlen(skb) + th->doff * 4))
1680 sk = __inet_lookup_established(net, &tcp_hashinfo,
1681 iph->saddr, th->source,
1682 iph->daddr, ntohs(th->dest),
1686 skb->destructor = sock_edemux;
1687 if (sk->sk_state != TCP_TIME_WAIT) {
1688 struct dst_entry *dst = sk->sk_rx_dst;
1690 dst = dst_check(dst, 0);
1692 struct rtable *rt = (struct rtable *) dst;
1694 if (rt->rt_iif == dev->ifindex)
1695 skb_dst_set_noref(skb, dst);
1705 int tcp_v4_rcv(struct sk_buff *skb)
1707 const struct iphdr *iph;
1708 const struct tcphdr *th;
1711 struct net *net = dev_net(skb->dev);
1713 if (skb->pkt_type != PACKET_HOST)
1716 /* Count it even if it's bad */
1717 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1719 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1724 if (th->doff < sizeof(struct tcphdr) / 4)
1726 if (!pskb_may_pull(skb, th->doff * 4))
1729 /* An explanation is required here, I think.
1730 * Packet length and doff are validated by header prediction,
1731 * provided case of th->doff==0 is eliminated.
1732 * So, we defer the checks. */
1733 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1738 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1739 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1740 skb->len - th->doff * 4);
1741 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1742 TCP_SKB_CB(skb)->when = 0;
1743 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1744 TCP_SKB_CB(skb)->sacked = 0;
1746 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1751 if (sk->sk_state == TCP_TIME_WAIT)
1754 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1755 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1756 goto discard_and_relse;
1759 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1760 goto discard_and_relse;
1763 if (sk_filter(sk, skb))
1764 goto discard_and_relse;
1768 bh_lock_sock_nested(sk);
1770 if (!sock_owned_by_user(sk)) {
1771 #ifdef CONFIG_NET_DMA
1772 struct tcp_sock *tp = tcp_sk(sk);
1773 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1774 tp->ucopy.dma_chan = net_dma_find_channel();
1775 if (tp->ucopy.dma_chan)
1776 ret = tcp_v4_do_rcv(sk, skb);
1780 if (!tcp_prequeue(sk, skb))
1781 ret = tcp_v4_do_rcv(sk, skb);
1783 } else if (unlikely(sk_add_backlog(sk, skb,
1784 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1786 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1787 goto discard_and_relse;
1796 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1799 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1801 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1803 tcp_v4_send_reset(NULL, skb);
1807 /* Discard frame. */
1816 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1817 inet_twsk_put(inet_twsk(sk));
1821 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1822 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1823 inet_twsk_put(inet_twsk(sk));
1826 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1828 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1830 iph->daddr, th->dest,
1833 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1834 inet_twsk_put(inet_twsk(sk));
1838 /* Fall through to ACK */
1841 tcp_v4_timewait_ack(sk, skb);
1845 case TCP_TW_SUCCESS:;
1850 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1851 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1852 .twsk_unique = tcp_twsk_unique,
1853 .twsk_destructor= tcp_twsk_destructor,
1856 const struct inet_connection_sock_af_ops ipv4_specific = {
1857 .queue_xmit = ip_queue_xmit,
1858 .send_check = tcp_v4_send_check,
1859 .rebuild_header = inet_sk_rebuild_header,
1860 .conn_request = tcp_v4_conn_request,
1861 .syn_recv_sock = tcp_v4_syn_recv_sock,
1862 .net_header_len = sizeof(struct iphdr),
1863 .setsockopt = ip_setsockopt,
1864 .getsockopt = ip_getsockopt,
1865 .addr2sockaddr = inet_csk_addr2sockaddr,
1866 .sockaddr_len = sizeof(struct sockaddr_in),
1867 .bind_conflict = inet_csk_bind_conflict,
1868 #ifdef CONFIG_COMPAT
1869 .compat_setsockopt = compat_ip_setsockopt,
1870 .compat_getsockopt = compat_ip_getsockopt,
1873 EXPORT_SYMBOL(ipv4_specific);
1875 #ifdef CONFIG_TCP_MD5SIG
1876 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1877 .md5_lookup = tcp_v4_md5_lookup,
1878 .calc_md5_hash = tcp_v4_md5_hash_skb,
1879 .md5_parse = tcp_v4_parse_md5_keys,
1883 /* NOTE: A lot of things set to zero explicitly by call to
1884 * sk_alloc() so need not be done here.
1886 static int tcp_v4_init_sock(struct sock *sk)
1888 struct inet_connection_sock *icsk = inet_csk(sk);
1892 icsk->icsk_af_ops = &ipv4_specific;
1894 #ifdef CONFIG_TCP_MD5SIG
1895 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1901 void tcp_v4_destroy_sock(struct sock *sk)
1903 struct tcp_sock *tp = tcp_sk(sk);
1905 tcp_clear_xmit_timers(sk);
1907 tcp_cleanup_congestion_control(sk);
1909 /* Cleanup up the write buffer. */
1910 tcp_write_queue_purge(sk);
1912 /* Cleans up our, hopefully empty, out_of_order_queue. */
1913 __skb_queue_purge(&tp->out_of_order_queue);
1915 #ifdef CONFIG_TCP_MD5SIG
1916 /* Clean up the MD5 key list, if any */
1917 if (tp->md5sig_info) {
1918 tcp_clear_md5_list(sk);
1919 kfree_rcu(tp->md5sig_info, rcu);
1920 tp->md5sig_info = NULL;
1924 #ifdef CONFIG_NET_DMA
1925 /* Cleans up our sk_async_wait_queue */
1926 __skb_queue_purge(&sk->sk_async_wait_queue);
1929 /* Clean prequeue, it must be empty really */
1930 __skb_queue_purge(&tp->ucopy.prequeue);
1932 /* Clean up a referenced TCP bind bucket. */
1933 if (inet_csk(sk)->icsk_bind_hash)
1937 * If sendmsg cached page exists, toss it.
1939 if (sk->sk_sndmsg_page) {
1940 __free_page(sk->sk_sndmsg_page);
1941 sk->sk_sndmsg_page = NULL;
1944 /* TCP Cookie Transactions */
1945 if (tp->cookie_values != NULL) {
1946 kref_put(&tp->cookie_values->kref,
1947 tcp_cookie_values_release);
1948 tp->cookie_values = NULL;
1951 sk_sockets_allocated_dec(sk);
1952 sock_release_memcg(sk);
1954 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1956 #ifdef CONFIG_PROC_FS
1957 /* Proc filesystem TCP sock list dumping. */
1959 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1961 return hlist_nulls_empty(head) ? NULL :
1962 list_entry(head->first, struct inet_timewait_sock, tw_node);
1965 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1967 return !is_a_nulls(tw->tw_node.next) ?
1968 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1972 * Get next listener socket follow cur. If cur is NULL, get first socket
1973 * starting from bucket given in st->bucket; when st->bucket is zero the
1974 * very first socket in the hash table is returned.
1976 static void *listening_get_next(struct seq_file *seq, void *cur)
1978 struct inet_connection_sock *icsk;
1979 struct hlist_nulls_node *node;
1980 struct sock *sk = cur;
1981 struct inet_listen_hashbucket *ilb;
1982 struct tcp_iter_state *st = seq->private;
1983 struct net *net = seq_file_net(seq);
1986 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1987 spin_lock_bh(&ilb->lock);
1988 sk = sk_nulls_head(&ilb->head);
1992 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1996 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1997 struct request_sock *req = cur;
1999 icsk = inet_csk(st->syn_wait_sk);
2003 if (req->rsk_ops->family == st->family) {
2009 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2012 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2014 sk = sk_nulls_next(st->syn_wait_sk);
2015 st->state = TCP_SEQ_STATE_LISTENING;
2016 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2018 icsk = inet_csk(sk);
2019 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2020 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2022 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2023 sk = sk_nulls_next(sk);
2026 sk_nulls_for_each_from(sk, node) {
2027 if (!net_eq(sock_net(sk), net))
2029 if (sk->sk_family == st->family) {
2033 icsk = inet_csk(sk);
2034 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2035 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2037 st->uid = sock_i_uid(sk);
2038 st->syn_wait_sk = sk;
2039 st->state = TCP_SEQ_STATE_OPENREQ;
2043 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2045 spin_unlock_bh(&ilb->lock);
2047 if (++st->bucket < INET_LHTABLE_SIZE) {
2048 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2049 spin_lock_bh(&ilb->lock);
2050 sk = sk_nulls_head(&ilb->head);
2058 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2060 struct tcp_iter_state *st = seq->private;
2065 rc = listening_get_next(seq, NULL);
2067 while (rc && *pos) {
2068 rc = listening_get_next(seq, rc);
2074 static inline bool empty_bucket(struct tcp_iter_state *st)
2076 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2077 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2081 * Get first established socket starting from bucket given in st->bucket.
2082 * If st->bucket is zero, the very first socket in the hash is returned.
2084 static void *established_get_first(struct seq_file *seq)
2086 struct tcp_iter_state *st = seq->private;
2087 struct net *net = seq_file_net(seq);
2091 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2093 struct hlist_nulls_node *node;
2094 struct inet_timewait_sock *tw;
2095 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2097 /* Lockless fast path for the common case of empty buckets */
2098 if (empty_bucket(st))
2102 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2103 if (sk->sk_family != st->family ||
2104 !net_eq(sock_net(sk), net)) {
2110 st->state = TCP_SEQ_STATE_TIME_WAIT;
2111 inet_twsk_for_each(tw, node,
2112 &tcp_hashinfo.ehash[st->bucket].twchain) {
2113 if (tw->tw_family != st->family ||
2114 !net_eq(twsk_net(tw), net)) {
2120 spin_unlock_bh(lock);
2121 st->state = TCP_SEQ_STATE_ESTABLISHED;
2127 static void *established_get_next(struct seq_file *seq, void *cur)
2129 struct sock *sk = cur;
2130 struct inet_timewait_sock *tw;
2131 struct hlist_nulls_node *node;
2132 struct tcp_iter_state *st = seq->private;
2133 struct net *net = seq_file_net(seq);
2138 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2142 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2149 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2150 st->state = TCP_SEQ_STATE_ESTABLISHED;
2152 /* Look for next non empty bucket */
2154 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2157 if (st->bucket > tcp_hashinfo.ehash_mask)
2160 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2161 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2163 sk = sk_nulls_next(sk);
2165 sk_nulls_for_each_from(sk, node) {
2166 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2170 st->state = TCP_SEQ_STATE_TIME_WAIT;
2171 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2179 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2181 struct tcp_iter_state *st = seq->private;
2185 rc = established_get_first(seq);
2188 rc = established_get_next(seq, rc);
2194 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2197 struct tcp_iter_state *st = seq->private;
2199 st->state = TCP_SEQ_STATE_LISTENING;
2200 rc = listening_get_idx(seq, &pos);
2203 st->state = TCP_SEQ_STATE_ESTABLISHED;
2204 rc = established_get_idx(seq, pos);
2210 static void *tcp_seek_last_pos(struct seq_file *seq)
2212 struct tcp_iter_state *st = seq->private;
2213 int offset = st->offset;
2214 int orig_num = st->num;
2217 switch (st->state) {
2218 case TCP_SEQ_STATE_OPENREQ:
2219 case TCP_SEQ_STATE_LISTENING:
2220 if (st->bucket >= INET_LHTABLE_SIZE)
2222 st->state = TCP_SEQ_STATE_LISTENING;
2223 rc = listening_get_next(seq, NULL);
2224 while (offset-- && rc)
2225 rc = listening_get_next(seq, rc);
2230 case TCP_SEQ_STATE_ESTABLISHED:
2231 case TCP_SEQ_STATE_TIME_WAIT:
2232 st->state = TCP_SEQ_STATE_ESTABLISHED;
2233 if (st->bucket > tcp_hashinfo.ehash_mask)
2235 rc = established_get_first(seq);
2236 while (offset-- && rc)
2237 rc = established_get_next(seq, rc);
2245 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2247 struct tcp_iter_state *st = seq->private;
2250 if (*pos && *pos == st->last_pos) {
2251 rc = tcp_seek_last_pos(seq);
2256 st->state = TCP_SEQ_STATE_LISTENING;
2260 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2263 st->last_pos = *pos;
2267 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2269 struct tcp_iter_state *st = seq->private;
2272 if (v == SEQ_START_TOKEN) {
2273 rc = tcp_get_idx(seq, 0);
2277 switch (st->state) {
2278 case TCP_SEQ_STATE_OPENREQ:
2279 case TCP_SEQ_STATE_LISTENING:
2280 rc = listening_get_next(seq, v);
2282 st->state = TCP_SEQ_STATE_ESTABLISHED;
2285 rc = established_get_first(seq);
2288 case TCP_SEQ_STATE_ESTABLISHED:
2289 case TCP_SEQ_STATE_TIME_WAIT:
2290 rc = established_get_next(seq, v);
2295 st->last_pos = *pos;
2299 static void tcp_seq_stop(struct seq_file *seq, void *v)
2301 struct tcp_iter_state *st = seq->private;
2303 switch (st->state) {
2304 case TCP_SEQ_STATE_OPENREQ:
2306 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2307 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2309 case TCP_SEQ_STATE_LISTENING:
2310 if (v != SEQ_START_TOKEN)
2311 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2313 case TCP_SEQ_STATE_TIME_WAIT:
2314 case TCP_SEQ_STATE_ESTABLISHED:
2316 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2321 int tcp_seq_open(struct inode *inode, struct file *file)
2323 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2324 struct tcp_iter_state *s;
2327 err = seq_open_net(inode, file, &afinfo->seq_ops,
2328 sizeof(struct tcp_iter_state));
2332 s = ((struct seq_file *)file->private_data)->private;
2333 s->family = afinfo->family;
2337 EXPORT_SYMBOL(tcp_seq_open);
2339 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2342 struct proc_dir_entry *p;
2344 afinfo->seq_ops.start = tcp_seq_start;
2345 afinfo->seq_ops.next = tcp_seq_next;
2346 afinfo->seq_ops.stop = tcp_seq_stop;
2348 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2349 afinfo->seq_fops, afinfo);
2354 EXPORT_SYMBOL(tcp_proc_register);
2356 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2358 proc_net_remove(net, afinfo->name);
2360 EXPORT_SYMBOL(tcp_proc_unregister);
2362 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2363 struct seq_file *f, int i, int uid, int *len)
2365 const struct inet_request_sock *ireq = inet_rsk(req);
2366 int ttd = req->expires - jiffies;
2368 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2369 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2372 ntohs(inet_sk(sk)->inet_sport),
2374 ntohs(ireq->rmt_port),
2376 0, 0, /* could print option size, but that is af dependent. */
2377 1, /* timers active (only the expire timer) */
2378 jiffies_to_clock_t(ttd),
2381 0, /* non standard timer */
2382 0, /* open_requests have no inode */
2383 atomic_read(&sk->sk_refcnt),
2388 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2391 unsigned long timer_expires;
2392 const struct tcp_sock *tp = tcp_sk(sk);
2393 const struct inet_connection_sock *icsk = inet_csk(sk);
2394 const struct inet_sock *inet = inet_sk(sk);
2395 __be32 dest = inet->inet_daddr;
2396 __be32 src = inet->inet_rcv_saddr;
2397 __u16 destp = ntohs(inet->inet_dport);
2398 __u16 srcp = ntohs(inet->inet_sport);
2401 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2403 timer_expires = icsk->icsk_timeout;
2404 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2406 timer_expires = icsk->icsk_timeout;
2407 } else if (timer_pending(&sk->sk_timer)) {
2409 timer_expires = sk->sk_timer.expires;
2412 timer_expires = jiffies;
2415 if (sk->sk_state == TCP_LISTEN)
2416 rx_queue = sk->sk_ack_backlog;
2419 * because we dont lock socket, we might find a transient negative value
2421 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2423 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2424 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2425 i, src, srcp, dest, destp, sk->sk_state,
2426 tp->write_seq - tp->snd_una,
2429 jiffies_to_clock_t(timer_expires - jiffies),
2430 icsk->icsk_retransmits,
2432 icsk->icsk_probes_out,
2434 atomic_read(&sk->sk_refcnt), sk,
2435 jiffies_to_clock_t(icsk->icsk_rto),
2436 jiffies_to_clock_t(icsk->icsk_ack.ato),
2437 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2439 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2443 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2444 struct seq_file *f, int i, int *len)
2448 int ttd = tw->tw_ttd - jiffies;
2453 dest = tw->tw_daddr;
2454 src = tw->tw_rcv_saddr;
2455 destp = ntohs(tw->tw_dport);
2456 srcp = ntohs(tw->tw_sport);
2458 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2459 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2460 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2461 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2462 atomic_read(&tw->tw_refcnt), tw, len);
2467 static int tcp4_seq_show(struct seq_file *seq, void *v)
2469 struct tcp_iter_state *st;
2472 if (v == SEQ_START_TOKEN) {
2473 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2474 " sl local_address rem_address st tx_queue "
2475 "rx_queue tr tm->when retrnsmt uid timeout "
2481 switch (st->state) {
2482 case TCP_SEQ_STATE_LISTENING:
2483 case TCP_SEQ_STATE_ESTABLISHED:
2484 get_tcp4_sock(v, seq, st->num, &len);
2486 case TCP_SEQ_STATE_OPENREQ:
2487 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2489 case TCP_SEQ_STATE_TIME_WAIT:
2490 get_timewait4_sock(v, seq, st->num, &len);
2493 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2498 static const struct file_operations tcp_afinfo_seq_fops = {
2499 .owner = THIS_MODULE,
2500 .open = tcp_seq_open,
2502 .llseek = seq_lseek,
2503 .release = seq_release_net
2506 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2509 .seq_fops = &tcp_afinfo_seq_fops,
2511 .show = tcp4_seq_show,
2515 static int __net_init tcp4_proc_init_net(struct net *net)
2517 return tcp_proc_register(net, &tcp4_seq_afinfo);
2520 static void __net_exit tcp4_proc_exit_net(struct net *net)
2522 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2525 static struct pernet_operations tcp4_net_ops = {
2526 .init = tcp4_proc_init_net,
2527 .exit = tcp4_proc_exit_net,
2530 int __init tcp4_proc_init(void)
2532 return register_pernet_subsys(&tcp4_net_ops);
2535 void tcp4_proc_exit(void)
2537 unregister_pernet_subsys(&tcp4_net_ops);
2539 #endif /* CONFIG_PROC_FS */
2541 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2543 const struct iphdr *iph = skb_gro_network_header(skb);
2545 switch (skb->ip_summed) {
2546 case CHECKSUM_COMPLETE:
2547 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2549 skb->ip_summed = CHECKSUM_UNNECESSARY;
2555 NAPI_GRO_CB(skb)->flush = 1;
2559 return tcp_gro_receive(head, skb);
2562 int tcp4_gro_complete(struct sk_buff *skb)
2564 const struct iphdr *iph = ip_hdr(skb);
2565 struct tcphdr *th = tcp_hdr(skb);
2567 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2568 iph->saddr, iph->daddr, 0);
2569 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2571 return tcp_gro_complete(skb);
2574 struct proto tcp_prot = {
2576 .owner = THIS_MODULE,
2578 .connect = tcp_v4_connect,
2579 .disconnect = tcp_disconnect,
2580 .accept = inet_csk_accept,
2582 .init = tcp_v4_init_sock,
2583 .destroy = tcp_v4_destroy_sock,
2584 .shutdown = tcp_shutdown,
2585 .setsockopt = tcp_setsockopt,
2586 .getsockopt = tcp_getsockopt,
2587 .recvmsg = tcp_recvmsg,
2588 .sendmsg = tcp_sendmsg,
2589 .sendpage = tcp_sendpage,
2590 .backlog_rcv = tcp_v4_do_rcv,
2592 .unhash = inet_unhash,
2593 .get_port = inet_csk_get_port,
2594 .enter_memory_pressure = tcp_enter_memory_pressure,
2595 .sockets_allocated = &tcp_sockets_allocated,
2596 .orphan_count = &tcp_orphan_count,
2597 .memory_allocated = &tcp_memory_allocated,
2598 .memory_pressure = &tcp_memory_pressure,
2599 .sysctl_wmem = sysctl_tcp_wmem,
2600 .sysctl_rmem = sysctl_tcp_rmem,
2601 .max_header = MAX_TCP_HEADER,
2602 .obj_size = sizeof(struct tcp_sock),
2603 .slab_flags = SLAB_DESTROY_BY_RCU,
2604 .twsk_prot = &tcp_timewait_sock_ops,
2605 .rsk_prot = &tcp_request_sock_ops,
2606 .h.hashinfo = &tcp_hashinfo,
2607 .no_autobind = true,
2608 #ifdef CONFIG_COMPAT
2609 .compat_setsockopt = compat_tcp_setsockopt,
2610 .compat_getsockopt = compat_tcp_getsockopt,
2612 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2613 .init_cgroup = tcp_init_cgroup,
2614 .destroy_cgroup = tcp_destroy_cgroup,
2615 .proto_cgroup = tcp_proto_cgroup,
2618 EXPORT_SYMBOL(tcp_prot);
2620 static int __net_init tcp_sk_init(struct net *net)
2622 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2623 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2626 static void __net_exit tcp_sk_exit(struct net *net)
2628 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2631 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2633 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2636 static struct pernet_operations __net_initdata tcp_sk_ops = {
2637 .init = tcp_sk_init,
2638 .exit = tcp_sk_exit,
2639 .exit_batch = tcp_sk_exit_batch,
2642 void __init tcp_v4_init(void)
2644 inet_hashinfo_init(&tcp_hashinfo);
2645 if (register_pernet_subsys(&tcp_sk_ops))
2646 panic("Failed to create the TCP control socket.\n");