3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
86 const struct in6_addr *addr)
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 struct dst_entry *dst = skb_dst(skb);
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
106 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
108 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
111 tcp_hdr(skb)->source);
114 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
117 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
118 struct inet_sock *inet = inet_sk(sk);
119 struct inet_connection_sock *icsk = inet_csk(sk);
120 struct ipv6_pinfo *np = inet6_sk(sk);
121 struct tcp_sock *tp = tcp_sk(sk);
122 struct in6_addr *saddr = NULL, *final_p, final;
124 struct dst_entry *dst;
128 if (addr_len < SIN6_LEN_RFC2133)
131 if (usin->sin6_family != AF_INET6)
132 return -EAFNOSUPPORT;
134 memset(&fl6, 0, sizeof(fl6));
137 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 IP6_ECN_flow_init(fl6.flowlabel);
139 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
140 struct ip6_flowlabel *flowlabel;
141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
144 fl6_sock_release(flowlabel);
149 * connect() to INADDR_ANY means loopback (BSD'ism).
152 if (ipv6_addr_any(&usin->sin6_addr))
153 usin->sin6_addr.s6_addr[15] = 0x1;
155 addr_type = ipv6_addr_type(&usin->sin6_addr);
157 if (addr_type & IPV6_ADDR_MULTICAST)
160 if (addr_type&IPV6_ADDR_LINKLOCAL) {
161 if (addr_len >= sizeof(struct sockaddr_in6) &&
162 usin->sin6_scope_id) {
163 /* If interface is set while binding, indices
166 if (sk->sk_bound_dev_if &&
167 sk->sk_bound_dev_if != usin->sin6_scope_id)
170 sk->sk_bound_dev_if = usin->sin6_scope_id;
173 /* Connect to link-local address requires an interface */
174 if (!sk->sk_bound_dev_if)
178 if (tp->rx_opt.ts_recent_stamp &&
179 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
180 tp->rx_opt.ts_recent = 0;
181 tp->rx_opt.ts_recent_stamp = 0;
185 sk->sk_v6_daddr = usin->sin6_addr;
186 np->flow_label = fl6.flowlabel;
192 if (addr_type == IPV6_ADDR_MAPPED) {
193 u32 exthdrlen = icsk->icsk_ext_hdr_len;
194 struct sockaddr_in sin;
196 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
198 if (__ipv6_only_sock(sk))
201 sin.sin_family = AF_INET;
202 sin.sin_port = usin->sin6_port;
203 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
205 icsk->icsk_af_ops = &ipv6_mapped;
206 sk->sk_backlog_rcv = tcp_v4_do_rcv;
207 #ifdef CONFIG_TCP_MD5SIG
208 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
211 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
214 icsk->icsk_ext_hdr_len = exthdrlen;
215 icsk->icsk_af_ops = &ipv6_specific;
216 sk->sk_backlog_rcv = tcp_v6_do_rcv;
217 #ifdef CONFIG_TCP_MD5SIG
218 tp->af_specific = &tcp_sock_ipv6_specific;
222 np->saddr = sk->sk_v6_rcv_saddr;
227 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
228 saddr = &sk->sk_v6_rcv_saddr;
230 fl6.flowi6_proto = IPPROTO_TCP;
231 fl6.daddr = sk->sk_v6_daddr;
232 fl6.saddr = saddr ? *saddr : np->saddr;
233 fl6.flowi6_oif = sk->sk_bound_dev_if;
234 fl6.flowi6_mark = sk->sk_mark;
235 fl6.fl6_dport = usin->sin6_port;
236 fl6.fl6_sport = inet->inet_sport;
238 final_p = fl6_update_dst(&fl6, np->opt, &final);
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
250 sk->sk_v6_rcv_saddr = *saddr;
253 /* set the source address */
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
257 sk->sk_gso_type = SKB_GSO_TCPV6;
258 __ip6_dst_store(sk, dst, NULL, NULL);
260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 tcp_fetch_timewait_stamp(sk, dst);
265 icsk->icsk_ext_hdr_len = 0;
267 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
272 inet->inet_dport = usin->sin6_port;
274 tcp_set_state(sk, TCP_SYN_SENT);
275 err = inet6_hash_connect(&tcp_death_row, sk);
281 if (!tp->write_seq && likely(!tp->repair))
282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 sk->sk_v6_daddr.s6_addr32,
287 err = tcp_connect(sk);
294 tcp_set_state(sk, TCP_CLOSE);
297 inet->inet_dport = 0;
298 sk->sk_route_caps = 0;
302 static void tcp_v6_mtu_reduced(struct sock *sk)
304 struct dst_entry *dst;
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
326 struct ipv6_pinfo *np;
332 sk = __inet6_lookup_established(net, &tcp_hashinfo,
333 &hdr->daddr, th->dest,
334 &hdr->saddr, ntohs(th->source),
338 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
343 if (sk->sk_state == TCP_TIME_WAIT) {
344 inet_twsk_put(inet_twsk(sk));
347 seq = ntohl(th->seq);
348 if (sk->sk_state == TCP_NEW_SYN_RECV)
349 return tcp_req_err(sk, seq);
352 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
353 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
355 if (sk->sk_state == TCP_CLOSE)
358 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
359 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
364 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
365 fastopen = tp->fastopen_rsk;
366 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
367 if (sk->sk_state != TCP_LISTEN &&
368 !between(seq, snd_una, tp->snd_nxt)) {
369 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
375 if (type == NDISC_REDIRECT) {
376 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
379 dst->ops->redirect(dst, sk, skb);
383 if (type == ICMPV6_PKT_TOOBIG) {
384 /* We are not interested in TCP_LISTEN and open_requests
385 * (SYN-ACKs send out by Linux are always <576bytes so
386 * they should go through unfragmented).
388 if (sk->sk_state == TCP_LISTEN)
391 if (!ip6_sk_accept_pmtu(sk))
394 tp->mtu_info = ntohl(info);
395 if (!sock_owned_by_user(sk))
396 tcp_v6_mtu_reduced(sk);
397 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
403 icmpv6_err_convert(type, code, &err);
405 /* Might be for an request_sock */
406 switch (sk->sk_state) {
409 /* Only in fast or simultaneous open. If a fast open socket is
410 * is already accepted it is treated as a connected one below.
412 if (fastopen && !fastopen->sk)
415 if (!sock_owned_by_user(sk)) {
417 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
421 sk->sk_err_soft = err;
425 if (!sock_owned_by_user(sk) && np->recverr) {
427 sk->sk_error_report(sk);
429 sk->sk_err_soft = err;
437 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
439 struct request_sock *req,
440 struct tcp_fastopen_cookie *foc,
443 struct inet_request_sock *ireq = inet_rsk(req);
444 struct ipv6_pinfo *np = inet6_sk(sk);
445 struct flowi6 *fl6 = &fl->u.ip6;
449 /* First, grab a route. */
450 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
451 IPPROTO_TCP)) == NULL)
454 skb = tcp_make_synack(sk, dst, req, foc, attach_req);
457 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
458 &ireq->ir_v6_rmt_addr);
460 fl6->daddr = ireq->ir_v6_rmt_addr;
461 if (np->repflow && ireq->pktopts)
462 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
464 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
465 err = net_xmit_eval(err);
473 static void tcp_v6_reqsk_destructor(struct request_sock *req)
475 kfree_skb(inet_rsk(req)->pktopts);
478 #ifdef CONFIG_TCP_MD5SIG
479 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
480 const struct in6_addr *addr)
482 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
485 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
486 const struct sock *addr_sk)
488 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
491 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
494 struct tcp_md5sig cmd;
495 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
497 if (optlen < sizeof(cmd))
500 if (copy_from_user(&cmd, optval, sizeof(cmd)))
503 if (sin6->sin6_family != AF_INET6)
506 if (!cmd.tcpm_keylen) {
507 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
508 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
510 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
514 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
517 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
518 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
519 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
521 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
522 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
525 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
526 const struct in6_addr *daddr,
527 const struct in6_addr *saddr, int nbytes)
529 struct tcp6_pseudohdr *bp;
530 struct scatterlist sg;
532 bp = &hp->md5_blk.ip6;
533 /* 1. TCP pseudo-header (RFC2460) */
536 bp->protocol = cpu_to_be32(IPPROTO_TCP);
537 bp->len = cpu_to_be32(nbytes);
539 sg_init_one(&sg, bp, sizeof(*bp));
540 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
543 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
544 const struct in6_addr *daddr, struct in6_addr *saddr,
545 const struct tcphdr *th)
547 struct tcp_md5sig_pool *hp;
548 struct hash_desc *desc;
550 hp = tcp_get_md5sig_pool();
552 goto clear_hash_noput;
553 desc = &hp->md5_desc;
555 if (crypto_hash_init(desc))
557 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
559 if (tcp_md5_hash_header(hp, th))
561 if (tcp_md5_hash_key(hp, key))
563 if (crypto_hash_final(desc, md5_hash))
566 tcp_put_md5sig_pool();
570 tcp_put_md5sig_pool();
572 memset(md5_hash, 0, 16);
576 static int tcp_v6_md5_hash_skb(char *md5_hash,
577 const struct tcp_md5sig_key *key,
578 const struct sock *sk,
579 const struct sk_buff *skb)
581 const struct in6_addr *saddr, *daddr;
582 struct tcp_md5sig_pool *hp;
583 struct hash_desc *desc;
584 const struct tcphdr *th = tcp_hdr(skb);
586 if (sk) { /* valid for establish/request sockets */
587 saddr = &sk->sk_v6_rcv_saddr;
588 daddr = &sk->sk_v6_daddr;
590 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
591 saddr = &ip6h->saddr;
592 daddr = &ip6h->daddr;
595 hp = tcp_get_md5sig_pool();
597 goto clear_hash_noput;
598 desc = &hp->md5_desc;
600 if (crypto_hash_init(desc))
603 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
605 if (tcp_md5_hash_header(hp, th))
607 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
609 if (tcp_md5_hash_key(hp, key))
611 if (crypto_hash_final(desc, md5_hash))
614 tcp_put_md5sig_pool();
618 tcp_put_md5sig_pool();
620 memset(md5_hash, 0, 16);
626 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
627 const struct sk_buff *skb)
629 #ifdef CONFIG_TCP_MD5SIG
630 const __u8 *hash_location = NULL;
631 struct tcp_md5sig_key *hash_expected;
632 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
633 const struct tcphdr *th = tcp_hdr(skb);
637 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
638 hash_location = tcp_parse_md5sig_option(th);
640 /* We've parsed the options - do we have a hash? */
641 if (!hash_expected && !hash_location)
644 if (hash_expected && !hash_location) {
645 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
649 if (!hash_expected && hash_location) {
650 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
654 /* check the signature */
655 genhash = tcp_v6_md5_hash_skb(newhash,
659 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
660 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
661 genhash ? "failed" : "mismatch",
662 &ip6h->saddr, ntohs(th->source),
663 &ip6h->daddr, ntohs(th->dest));
670 static void tcp_v6_init_req(struct request_sock *req,
671 const struct sock *sk_listener,
674 struct inet_request_sock *ireq = inet_rsk(req);
675 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
677 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
678 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
680 /* So that link locals have meaning */
681 if (!sk_listener->sk_bound_dev_if &&
682 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
683 ireq->ir_iif = tcp_v6_iif(skb);
685 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
686 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
687 np->rxopt.bits.rxinfo ||
688 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
689 np->rxopt.bits.rxohlim || np->repflow)) {
690 atomic_inc(&skb->users);
695 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
697 const struct request_sock *req,
702 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
705 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
707 .obj_size = sizeof(struct tcp6_request_sock),
708 .rtx_syn_ack = tcp_rtx_synack,
709 .send_ack = tcp_v6_reqsk_send_ack,
710 .destructor = tcp_v6_reqsk_destructor,
711 .send_reset = tcp_v6_send_reset,
712 .syn_ack_timeout = tcp_syn_ack_timeout,
715 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
716 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
717 sizeof(struct ipv6hdr),
718 #ifdef CONFIG_TCP_MD5SIG
719 .req_md5_lookup = tcp_v6_md5_lookup,
720 .calc_md5_hash = tcp_v6_md5_hash_skb,
722 .init_req = tcp_v6_init_req,
723 #ifdef CONFIG_SYN_COOKIES
724 .cookie_init_seq = cookie_v6_init_sequence,
726 .route_req = tcp_v6_route_req,
727 .init_seq = tcp_v6_init_sequence,
728 .send_synack = tcp_v6_send_synack,
731 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
732 u32 ack, u32 win, u32 tsval, u32 tsecr,
733 int oif, struct tcp_md5sig_key *key, int rst,
734 u8 tclass, u32 label)
736 const struct tcphdr *th = tcp_hdr(skb);
738 struct sk_buff *buff;
740 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
741 struct sock *ctl_sk = net->ipv6.tcp_sk;
742 unsigned int tot_len = sizeof(struct tcphdr);
743 struct dst_entry *dst;
747 tot_len += TCPOLEN_TSTAMP_ALIGNED;
748 #ifdef CONFIG_TCP_MD5SIG
750 tot_len += TCPOLEN_MD5SIG_ALIGNED;
753 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
758 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
760 t1 = (struct tcphdr *) skb_push(buff, tot_len);
761 skb_reset_transport_header(buff);
763 /* Swap the send and the receive. */
764 memset(t1, 0, sizeof(*t1));
765 t1->dest = th->source;
766 t1->source = th->dest;
767 t1->doff = tot_len / 4;
768 t1->seq = htonl(seq);
769 t1->ack_seq = htonl(ack);
770 t1->ack = !rst || !th->ack;
772 t1->window = htons(win);
774 topt = (__be32 *)(t1 + 1);
777 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
778 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
779 *topt++ = htonl(tsval);
780 *topt++ = htonl(tsecr);
783 #ifdef CONFIG_TCP_MD5SIG
785 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
786 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
787 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
788 &ipv6_hdr(skb)->saddr,
789 &ipv6_hdr(skb)->daddr, t1);
793 memset(&fl6, 0, sizeof(fl6));
794 fl6.daddr = ipv6_hdr(skb)->saddr;
795 fl6.saddr = ipv6_hdr(skb)->daddr;
796 fl6.flowlabel = label;
798 buff->ip_summed = CHECKSUM_PARTIAL;
801 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
803 fl6.flowi6_proto = IPPROTO_TCP;
804 if (rt6_need_strict(&fl6.daddr) && !oif)
805 fl6.flowi6_oif = tcp_v6_iif(skb);
807 fl6.flowi6_oif = oif;
808 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
809 fl6.fl6_dport = t1->dest;
810 fl6.fl6_sport = t1->source;
811 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
813 /* Pass a socket to ip6_dst_lookup either it is for RST
814 * Underlying function will use this to retrieve the network
817 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
819 skb_dst_set(buff, dst);
820 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
821 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
823 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
830 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
832 const struct tcphdr *th = tcp_hdr(skb);
833 u32 seq = 0, ack_seq = 0;
834 struct tcp_md5sig_key *key = NULL;
835 #ifdef CONFIG_TCP_MD5SIG
836 const __u8 *hash_location = NULL;
837 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
838 unsigned char newhash[16];
840 struct sock *sk1 = NULL;
847 /* If sk not NULL, it means we did a successful lookup and incoming
848 * route had to be correct. prequeue might have dropped our dst.
850 if (!sk && !ipv6_unicast_destination(skb))
853 #ifdef CONFIG_TCP_MD5SIG
854 hash_location = tcp_parse_md5sig_option(th);
855 if (!sk && hash_location) {
857 * active side is lost. Try to find listening socket through
858 * source port, and then find md5 key through listening socket.
859 * we are not loose security here:
860 * Incoming packet is checked with md5 hash with finding key,
861 * no RST generated if md5 hash doesn't match.
863 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
864 &tcp_hashinfo, &ipv6h->saddr,
865 th->source, &ipv6h->daddr,
866 ntohs(th->source), tcp_v6_iif(skb));
871 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
875 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
876 if (genhash || memcmp(hash_location, newhash, 16) != 0)
879 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
884 seq = ntohl(th->ack_seq);
886 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
889 oif = sk ? sk->sk_bound_dev_if : 0;
890 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
892 #ifdef CONFIG_TCP_MD5SIG
901 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
902 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
903 struct tcp_md5sig_key *key, u8 tclass,
906 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
910 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
912 struct inet_timewait_sock *tw = inet_twsk(sk);
913 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
915 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
916 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
917 tcp_time_stamp + tcptw->tw_ts_offset,
918 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
919 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
924 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
925 struct request_sock *req)
927 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
928 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
930 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
931 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
932 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
933 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
934 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
939 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
941 #ifdef CONFIG_SYN_COOKIES
942 const struct tcphdr *th = tcp_hdr(skb);
945 sk = cookie_v6_check(sk, skb);
950 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
952 if (skb->protocol == htons(ETH_P_IP))
953 return tcp_v4_conn_request(sk, skb);
955 if (!ipv6_unicast_destination(skb))
958 return tcp_conn_request(&tcp6_request_sock_ops,
959 &tcp_request_sock_ipv6_ops, sk, skb);
962 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
963 return 0; /* don't send reset */
966 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
967 struct request_sock *req,
968 struct dst_entry *dst,
969 struct request_sock *req_unhash,
972 struct inet_request_sock *ireq;
973 struct ipv6_pinfo *newnp;
974 const struct ipv6_pinfo *np = inet6_sk(sk);
975 struct tcp6_sock *newtcp6sk;
976 struct inet_sock *newinet;
977 struct tcp_sock *newtp;
979 #ifdef CONFIG_TCP_MD5SIG
980 struct tcp_md5sig_key *key;
984 if (skb->protocol == htons(ETH_P_IP)) {
989 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
990 req_unhash, own_req);
995 newtcp6sk = (struct tcp6_sock *)newsk;
996 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
998 newinet = inet_sk(newsk);
999 newnp = inet6_sk(newsk);
1000 newtp = tcp_sk(newsk);
1002 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1004 newnp->saddr = newsk->sk_v6_rcv_saddr;
1006 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1007 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1008 #ifdef CONFIG_TCP_MD5SIG
1009 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1012 newnp->ipv6_ac_list = NULL;
1013 newnp->ipv6_fl_list = NULL;
1014 newnp->pktoptions = NULL;
1016 newnp->mcast_oif = tcp_v6_iif(skb);
1017 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1018 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1020 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1023 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1024 * here, tcp_create_openreq_child now does this for us, see the comment in
1025 * that function for the gory details. -acme
1028 /* It is tricky place. Until this moment IPv4 tcp
1029 worked with IPv6 icsk.icsk_af_ops.
1032 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1037 ireq = inet_rsk(req);
1039 if (sk_acceptq_is_full(sk))
1043 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1048 newsk = tcp_create_openreq_child(sk, req, skb);
1053 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1054 * count here, tcp_create_openreq_child now does this for us, see the
1055 * comment in that function for the gory details. -acme
1058 newsk->sk_gso_type = SKB_GSO_TCPV6;
1059 __ip6_dst_store(newsk, dst, NULL, NULL);
1060 inet6_sk_rx_dst_set(newsk, skb);
1062 newtcp6sk = (struct tcp6_sock *)newsk;
1063 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1065 newtp = tcp_sk(newsk);
1066 newinet = inet_sk(newsk);
1067 newnp = inet6_sk(newsk);
1069 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1071 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1072 newnp->saddr = ireq->ir_v6_loc_addr;
1073 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1074 newsk->sk_bound_dev_if = ireq->ir_iif;
1076 /* Now IPv6 options...
1078 First: no IPv4 options.
1080 newinet->inet_opt = NULL;
1081 newnp->ipv6_ac_list = NULL;
1082 newnp->ipv6_fl_list = NULL;
1085 newnp->rxopt.all = np->rxopt.all;
1087 newnp->pktoptions = NULL;
1089 newnp->mcast_oif = tcp_v6_iif(skb);
1090 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1091 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1093 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1095 /* Clone native IPv6 options from listening socket (if any)
1097 Yes, keeping reference count would be much more clever,
1098 but we make one more one thing there: reattach optmem
1102 newnp->opt = ipv6_dup_options(newsk, np->opt);
1104 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1106 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1107 newnp->opt->opt_flen);
1109 tcp_ca_openreq_child(newsk, dst);
1111 tcp_sync_mss(newsk, dst_mtu(dst));
1112 newtp->advmss = dst_metric_advmss(dst);
1113 if (tcp_sk(sk)->rx_opt.user_mss &&
1114 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1115 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1117 tcp_initialize_rcv_mss(newsk);
1119 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1120 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1122 #ifdef CONFIG_TCP_MD5SIG
1123 /* Copy over the MD5 key from the original socket */
1124 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1126 /* We're using one, so create a matching key
1127 * on the newsk structure. If we fail to get
1128 * memory, then we end up not copying the key
1131 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1132 AF_INET6, key->key, key->keylen,
1133 sk_gfp_atomic(sk, GFP_ATOMIC));
1137 if (__inet_inherit_port(sk, newsk) < 0) {
1138 inet_csk_prepare_forced_close(newsk);
1142 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1144 tcp_move_syn(newtp, req);
1146 /* Clone pktoptions received with SYN, if we own the req */
1147 if (ireq->pktopts) {
1148 newnp->pktoptions = skb_clone(ireq->pktopts,
1149 sk_gfp_atomic(sk, GFP_ATOMIC));
1150 consume_skb(ireq->pktopts);
1151 ireq->pktopts = NULL;
1152 if (newnp->pktoptions)
1153 skb_set_owner_r(newnp->pktoptions, newsk);
1160 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1164 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1168 /* The socket must have it's spinlock held when we get
1169 * here, unless it is a TCP_LISTEN socket.
1171 * We have a potential double-lock case here, so even when
1172 * doing backlog processing we use the BH locking scheme.
1173 * This is because we cannot sleep with the original spinlock
1176 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1178 struct ipv6_pinfo *np = inet6_sk(sk);
1179 struct tcp_sock *tp;
1180 struct sk_buff *opt_skb = NULL;
1182 /* Imagine: socket is IPv6. IPv4 packet arrives,
1183 goes to IPv4 receive handler and backlogged.
1184 From backlog it always goes here. Kerboom...
1185 Fortunately, tcp_rcv_established and rcv_established
1186 handle them correctly, but it is not case with
1187 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1190 if (skb->protocol == htons(ETH_P_IP))
1191 return tcp_v4_do_rcv(sk, skb);
1193 if (sk_filter(sk, skb))
1197 * socket locking is here for SMP purposes as backlog rcv
1198 * is currently called with bh processing disabled.
1201 /* Do Stevens' IPV6_PKTOPTIONS.
1203 Yes, guys, it is the only place in our code, where we
1204 may make it not affecting IPv4.
1205 The rest of code is protocol independent,
1206 and I do not like idea to uglify IPv4.
1208 Actually, all the idea behind IPV6_PKTOPTIONS
1209 looks not very well thought. For now we latch
1210 options, received in the last packet, enqueued
1211 by tcp. Feel free to propose better solution.
1215 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1217 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1218 struct dst_entry *dst = sk->sk_rx_dst;
1220 sock_rps_save_rxhash(sk, skb);
1221 sk_mark_napi_id(sk, skb);
1223 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1224 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1226 sk->sk_rx_dst = NULL;
1230 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1232 goto ipv6_pktoptions;
1236 if (tcp_checksum_complete(skb))
1239 if (sk->sk_state == TCP_LISTEN) {
1240 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1246 sock_rps_save_rxhash(nsk, skb);
1247 sk_mark_napi_id(nsk, skb);
1248 if (tcp_child_process(sk, nsk, skb))
1251 __kfree_skb(opt_skb);
1255 sock_rps_save_rxhash(sk, skb);
1257 if (tcp_rcv_state_process(sk, skb))
1260 goto ipv6_pktoptions;
1264 tcp_v6_send_reset(sk, skb);
1267 __kfree_skb(opt_skb);
1271 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1272 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1277 /* Do you ask, what is it?
1279 1. skb was enqueued by tcp.
1280 2. skb is added to tail of read queue, rather than out of order.
1281 3. socket is not in passive state.
1282 4. Finally, it really contains options, which user wants to receive.
1285 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1286 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1287 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1288 np->mcast_oif = tcp_v6_iif(opt_skb);
1289 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1290 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1291 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1292 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1294 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1295 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1296 skb_set_owner_r(opt_skb, sk);
1297 opt_skb = xchg(&np->pktoptions, opt_skb);
1299 __kfree_skb(opt_skb);
1300 opt_skb = xchg(&np->pktoptions, NULL);
1308 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1309 const struct tcphdr *th)
1311 /* This is tricky: we move IP6CB at its correct location into
1312 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1313 * _decode_session6() uses IP6CB().
1314 * barrier() makes sure compiler won't play aliasing games.
1316 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1317 sizeof(struct inet6_skb_parm));
1320 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1321 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1322 skb->len - th->doff*4);
1323 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1324 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1325 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1326 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1327 TCP_SKB_CB(skb)->sacked = 0;
1330 static void tcp_v6_restore_cb(struct sk_buff *skb)
1332 /* We need to move header back to the beginning if xfrm6_policy_check()
1333 * and tcp_v6_fill_cb() are going to be called again.
1335 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1336 sizeof(struct inet6_skb_parm));
1339 static int tcp_v6_rcv(struct sk_buff *skb)
1341 const struct tcphdr *th;
1342 const struct ipv6hdr *hdr;
1345 struct net *net = dev_net(skb->dev);
1347 if (skb->pkt_type != PACKET_HOST)
1351 * Count it even if it's bad.
1353 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1355 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1360 if (th->doff < sizeof(struct tcphdr)/4)
1362 if (!pskb_may_pull(skb, th->doff*4))
1365 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1369 hdr = ipv6_hdr(skb);
1372 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1378 if (sk->sk_state == TCP_TIME_WAIT)
1381 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1382 struct request_sock *req = inet_reqsk(sk);
1383 struct sock *nsk = NULL;
1385 sk = req->rsk_listener;
1386 tcp_v6_fill_cb(skb, hdr, th);
1387 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1391 if (likely(sk->sk_state == TCP_LISTEN)) {
1392 nsk = tcp_check_req(sk, skb, req, false);
1394 inet_csk_reqsk_queue_drop_and_put(sk, req);
1404 tcp_v6_restore_cb(skb);
1405 } else if (tcp_child_process(sk, nsk, skb)) {
1406 tcp_v6_send_reset(nsk, skb);
1412 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1413 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1414 goto discard_and_relse;
1417 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1418 goto discard_and_relse;
1420 tcp_v6_fill_cb(skb, hdr, th);
1422 if (tcp_v6_inbound_md5_hash(sk, skb))
1423 goto discard_and_relse;
1425 if (sk_filter(sk, skb))
1426 goto discard_and_relse;
1430 if (sk->sk_state == TCP_LISTEN) {
1431 ret = tcp_v6_do_rcv(sk, skb);
1432 goto put_and_return;
1435 sk_incoming_cpu_update(sk);
1437 bh_lock_sock_nested(sk);
1438 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1440 if (!sock_owned_by_user(sk)) {
1441 if (!tcp_prequeue(sk, skb))
1442 ret = tcp_v6_do_rcv(sk, skb);
1443 } else if (unlikely(sk_add_backlog(sk, skb,
1444 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1446 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1447 goto discard_and_relse;
1453 return ret ? -1 : 0;
1456 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1459 tcp_v6_fill_cb(skb, hdr, th);
1461 if (tcp_checksum_complete(skb)) {
1463 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1465 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1467 tcp_v6_send_reset(NULL, skb);
1479 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1480 inet_twsk_put(inet_twsk(sk));
1484 tcp_v6_fill_cb(skb, hdr, th);
1486 if (tcp_checksum_complete(skb)) {
1487 inet_twsk_put(inet_twsk(sk));
1491 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1496 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1497 &ipv6_hdr(skb)->saddr, th->source,
1498 &ipv6_hdr(skb)->daddr,
1499 ntohs(th->dest), tcp_v6_iif(skb));
1501 struct inet_timewait_sock *tw = inet_twsk(sk);
1502 inet_twsk_deschedule_put(tw);
1504 tcp_v6_restore_cb(skb);
1507 /* Fall through to ACK */
1510 tcp_v6_timewait_ack(sk, skb);
1513 tcp_v6_restore_cb(skb);
1515 case TCP_TW_SUCCESS:
1521 static void tcp_v6_early_demux(struct sk_buff *skb)
1523 const struct ipv6hdr *hdr;
1524 const struct tcphdr *th;
1527 if (skb->pkt_type != PACKET_HOST)
1530 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1533 hdr = ipv6_hdr(skb);
1536 if (th->doff < sizeof(struct tcphdr) / 4)
1539 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1540 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1541 &hdr->saddr, th->source,
1542 &hdr->daddr, ntohs(th->dest),
1546 skb->destructor = sock_edemux;
1547 if (sk_fullsock(sk)) {
1548 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1551 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1553 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1554 skb_dst_set_noref(skb, dst);
1559 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1560 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1561 .twsk_unique = tcp_twsk_unique,
1562 .twsk_destructor = tcp_twsk_destructor,
1565 static const struct inet_connection_sock_af_ops ipv6_specific = {
1566 .queue_xmit = inet6_csk_xmit,
1567 .send_check = tcp_v6_send_check,
1568 .rebuild_header = inet6_sk_rebuild_header,
1569 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1570 .conn_request = tcp_v6_conn_request,
1571 .syn_recv_sock = tcp_v6_syn_recv_sock,
1572 .net_header_len = sizeof(struct ipv6hdr),
1573 .net_frag_header_len = sizeof(struct frag_hdr),
1574 .setsockopt = ipv6_setsockopt,
1575 .getsockopt = ipv6_getsockopt,
1576 .addr2sockaddr = inet6_csk_addr2sockaddr,
1577 .sockaddr_len = sizeof(struct sockaddr_in6),
1578 .bind_conflict = inet6_csk_bind_conflict,
1579 #ifdef CONFIG_COMPAT
1580 .compat_setsockopt = compat_ipv6_setsockopt,
1581 .compat_getsockopt = compat_ipv6_getsockopt,
1583 .mtu_reduced = tcp_v6_mtu_reduced,
1586 #ifdef CONFIG_TCP_MD5SIG
1587 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1588 .md5_lookup = tcp_v6_md5_lookup,
1589 .calc_md5_hash = tcp_v6_md5_hash_skb,
1590 .md5_parse = tcp_v6_parse_md5_keys,
1595 * TCP over IPv4 via INET6 API
1597 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1598 .queue_xmit = ip_queue_xmit,
1599 .send_check = tcp_v4_send_check,
1600 .rebuild_header = inet_sk_rebuild_header,
1601 .sk_rx_dst_set = inet_sk_rx_dst_set,
1602 .conn_request = tcp_v6_conn_request,
1603 .syn_recv_sock = tcp_v6_syn_recv_sock,
1604 .net_header_len = sizeof(struct iphdr),
1605 .setsockopt = ipv6_setsockopt,
1606 .getsockopt = ipv6_getsockopt,
1607 .addr2sockaddr = inet6_csk_addr2sockaddr,
1608 .sockaddr_len = sizeof(struct sockaddr_in6),
1609 .bind_conflict = inet6_csk_bind_conflict,
1610 #ifdef CONFIG_COMPAT
1611 .compat_setsockopt = compat_ipv6_setsockopt,
1612 .compat_getsockopt = compat_ipv6_getsockopt,
1614 .mtu_reduced = tcp_v4_mtu_reduced,
1617 #ifdef CONFIG_TCP_MD5SIG
1618 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1619 .md5_lookup = tcp_v4_md5_lookup,
1620 .calc_md5_hash = tcp_v4_md5_hash_skb,
1621 .md5_parse = tcp_v6_parse_md5_keys,
1625 /* NOTE: A lot of things set to zero explicitly by call to
1626 * sk_alloc() so need not be done here.
1628 static int tcp_v6_init_sock(struct sock *sk)
1630 struct inet_connection_sock *icsk = inet_csk(sk);
1634 icsk->icsk_af_ops = &ipv6_specific;
1636 #ifdef CONFIG_TCP_MD5SIG
1637 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1643 static void tcp_v6_destroy_sock(struct sock *sk)
1645 tcp_v4_destroy_sock(sk);
1646 inet6_destroy_sock(sk);
1649 #ifdef CONFIG_PROC_FS
1650 /* Proc filesystem TCPv6 sock list dumping. */
1651 static void get_openreq6(struct seq_file *seq,
1652 const struct request_sock *req, int i)
1654 long ttd = req->rsk_timer.expires - jiffies;
1655 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1656 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1662 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1663 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1665 src->s6_addr32[0], src->s6_addr32[1],
1666 src->s6_addr32[2], src->s6_addr32[3],
1667 inet_rsk(req)->ir_num,
1668 dest->s6_addr32[0], dest->s6_addr32[1],
1669 dest->s6_addr32[2], dest->s6_addr32[3],
1670 ntohs(inet_rsk(req)->ir_rmt_port),
1672 0, 0, /* could print option size, but that is af dependent. */
1673 1, /* timers active (only the expire timer) */
1674 jiffies_to_clock_t(ttd),
1676 from_kuid_munged(seq_user_ns(seq),
1677 sock_i_uid(req->rsk_listener)),
1678 0, /* non standard timer */
1679 0, /* open_requests have no inode */
1683 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1685 const struct in6_addr *dest, *src;
1688 unsigned long timer_expires;
1689 const struct inet_sock *inet = inet_sk(sp);
1690 const struct tcp_sock *tp = tcp_sk(sp);
1691 const struct inet_connection_sock *icsk = inet_csk(sp);
1692 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1696 dest = &sp->sk_v6_daddr;
1697 src = &sp->sk_v6_rcv_saddr;
1698 destp = ntohs(inet->inet_dport);
1699 srcp = ntohs(inet->inet_sport);
1701 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1703 timer_expires = icsk->icsk_timeout;
1704 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1706 timer_expires = icsk->icsk_timeout;
1707 } else if (timer_pending(&sp->sk_timer)) {
1709 timer_expires = sp->sk_timer.expires;
1712 timer_expires = jiffies;
1715 state = sk_state_load(sp);
1716 if (state == TCP_LISTEN)
1717 rx_queue = sp->sk_ack_backlog;
1719 /* Because we don't lock the socket,
1720 * we might find a transient negative value.
1722 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1725 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1726 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1728 src->s6_addr32[0], src->s6_addr32[1],
1729 src->s6_addr32[2], src->s6_addr32[3], srcp,
1730 dest->s6_addr32[0], dest->s6_addr32[1],
1731 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1733 tp->write_seq - tp->snd_una,
1736 jiffies_delta_to_clock_t(timer_expires - jiffies),
1737 icsk->icsk_retransmits,
1738 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1739 icsk->icsk_probes_out,
1741 atomic_read(&sp->sk_refcnt), sp,
1742 jiffies_to_clock_t(icsk->icsk_rto),
1743 jiffies_to_clock_t(icsk->icsk_ack.ato),
1744 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1746 state == TCP_LISTEN ?
1747 fastopenq->max_qlen :
1748 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1752 static void get_timewait6_sock(struct seq_file *seq,
1753 struct inet_timewait_sock *tw, int i)
1755 long delta = tw->tw_timer.expires - jiffies;
1756 const struct in6_addr *dest, *src;
1759 dest = &tw->tw_v6_daddr;
1760 src = &tw->tw_v6_rcv_saddr;
1761 destp = ntohs(tw->tw_dport);
1762 srcp = ntohs(tw->tw_sport);
1765 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1766 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1768 src->s6_addr32[0], src->s6_addr32[1],
1769 src->s6_addr32[2], src->s6_addr32[3], srcp,
1770 dest->s6_addr32[0], dest->s6_addr32[1],
1771 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1772 tw->tw_substate, 0, 0,
1773 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1774 atomic_read(&tw->tw_refcnt), tw);
1777 static int tcp6_seq_show(struct seq_file *seq, void *v)
1779 struct tcp_iter_state *st;
1780 struct sock *sk = v;
1782 if (v == SEQ_START_TOKEN) {
1787 "st tx_queue rx_queue tr tm->when retrnsmt"
1788 " uid timeout inode\n");
1793 if (sk->sk_state == TCP_TIME_WAIT)
1794 get_timewait6_sock(seq, v, st->num);
1795 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1796 get_openreq6(seq, v, st->num);
1798 get_tcp6_sock(seq, v, st->num);
1803 static const struct file_operations tcp6_afinfo_seq_fops = {
1804 .owner = THIS_MODULE,
1805 .open = tcp_seq_open,
1807 .llseek = seq_lseek,
1808 .release = seq_release_net
1811 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1814 .seq_fops = &tcp6_afinfo_seq_fops,
1816 .show = tcp6_seq_show,
1820 int __net_init tcp6_proc_init(struct net *net)
1822 return tcp_proc_register(net, &tcp6_seq_afinfo);
1825 void tcp6_proc_exit(struct net *net)
1827 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1831 static void tcp_v6_clear_sk(struct sock *sk, int size)
1833 struct inet_sock *inet = inet_sk(sk);
1835 /* we do not want to clear pinet6 field, because of RCU lookups */
1836 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1838 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1839 memset(&inet->pinet6 + 1, 0, size);
1842 struct proto tcpv6_prot = {
1844 .owner = THIS_MODULE,
1846 .connect = tcp_v6_connect,
1847 .disconnect = tcp_disconnect,
1848 .accept = inet_csk_accept,
1850 .init = tcp_v6_init_sock,
1851 .destroy = tcp_v6_destroy_sock,
1852 .shutdown = tcp_shutdown,
1853 .setsockopt = tcp_setsockopt,
1854 .getsockopt = tcp_getsockopt,
1855 .recvmsg = tcp_recvmsg,
1856 .sendmsg = tcp_sendmsg,
1857 .sendpage = tcp_sendpage,
1858 .backlog_rcv = tcp_v6_do_rcv,
1859 .release_cb = tcp_release_cb,
1861 .unhash = inet_unhash,
1862 .get_port = inet_csk_get_port,
1863 .enter_memory_pressure = tcp_enter_memory_pressure,
1864 .stream_memory_free = tcp_stream_memory_free,
1865 .sockets_allocated = &tcp_sockets_allocated,
1866 .memory_allocated = &tcp_memory_allocated,
1867 .memory_pressure = &tcp_memory_pressure,
1868 .orphan_count = &tcp_orphan_count,
1869 .sysctl_mem = sysctl_tcp_mem,
1870 .sysctl_wmem = sysctl_tcp_wmem,
1871 .sysctl_rmem = sysctl_tcp_rmem,
1872 .max_header = MAX_TCP_HEADER,
1873 .obj_size = sizeof(struct tcp6_sock),
1874 .slab_flags = SLAB_DESTROY_BY_RCU,
1875 .twsk_prot = &tcp6_timewait_sock_ops,
1876 .rsk_prot = &tcp6_request_sock_ops,
1877 .h.hashinfo = &tcp_hashinfo,
1878 .no_autobind = true,
1879 #ifdef CONFIG_COMPAT
1880 .compat_setsockopt = compat_tcp_setsockopt,
1881 .compat_getsockopt = compat_tcp_getsockopt,
1883 #ifdef CONFIG_MEMCG_KMEM
1884 .proto_cgroup = tcp_proto_cgroup,
1886 .clear_sk = tcp_v6_clear_sk,
1889 static const struct inet6_protocol tcpv6_protocol = {
1890 .early_demux = tcp_v6_early_demux,
1891 .handler = tcp_v6_rcv,
1892 .err_handler = tcp_v6_err,
1893 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1896 static struct inet_protosw tcpv6_protosw = {
1897 .type = SOCK_STREAM,
1898 .protocol = IPPROTO_TCP,
1899 .prot = &tcpv6_prot,
1900 .ops = &inet6_stream_ops,
1901 .flags = INET_PROTOSW_PERMANENT |
1905 static int __net_init tcpv6_net_init(struct net *net)
1907 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1908 SOCK_RAW, IPPROTO_TCP, net);
1911 static void __net_exit tcpv6_net_exit(struct net *net)
1913 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1916 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1918 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1921 static struct pernet_operations tcpv6_net_ops = {
1922 .init = tcpv6_net_init,
1923 .exit = tcpv6_net_exit,
1924 .exit_batch = tcpv6_net_exit_batch,
1927 int __init tcpv6_init(void)
1931 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1935 /* register inet6 protocol */
1936 ret = inet6_register_protosw(&tcpv6_protosw);
1938 goto out_tcpv6_protocol;
1940 ret = register_pernet_subsys(&tcpv6_net_ops);
1942 goto out_tcpv6_protosw;
1947 inet6_unregister_protosw(&tcpv6_protosw);
1949 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1953 void tcpv6_exit(void)
1955 unregister_pernet_subsys(&tcpv6_net_ops);
1956 inet6_unregister_protosw(&tcpv6_protosw);
1957 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);