3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 const struct in6_addr *addr)
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 struct dst_entry *dst = skb_dst(skb);
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
103 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
107 static void tcp_v6_hash(struct sock *sk)
109 if (sk->sk_state != TCP_CLOSE) {
110 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
115 __inet6_hash(sk, NULL);
120 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
122 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
123 ipv6_hdr(skb)->saddr.s6_addr32,
125 tcp_hdr(skb)->source);
128 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
131 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
132 struct inet_sock *inet = inet_sk(sk);
133 struct inet_connection_sock *icsk = inet_csk(sk);
134 struct ipv6_pinfo *np = inet6_sk(sk);
135 struct tcp_sock *tp = tcp_sk(sk);
136 struct in6_addr *saddr = NULL, *final_p, final;
139 struct dst_entry *dst;
143 if (addr_len < SIN6_LEN_RFC2133)
146 if (usin->sin6_family != AF_INET6)
147 return -EAFNOSUPPORT;
149 memset(&fl6, 0, sizeof(fl6));
152 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
153 IP6_ECN_flow_init(fl6.flowlabel);
154 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
155 struct ip6_flowlabel *flowlabel;
156 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
157 if (flowlabel == NULL)
159 fl6_sock_release(flowlabel);
164 * connect() to INADDR_ANY means loopback (BSD'ism).
167 if (ipv6_addr_any(&usin->sin6_addr))
168 usin->sin6_addr.s6_addr[15] = 0x1;
170 addr_type = ipv6_addr_type(&usin->sin6_addr);
172 if (addr_type & IPV6_ADDR_MULTICAST)
175 if (addr_type&IPV6_ADDR_LINKLOCAL) {
176 if (addr_len >= sizeof(struct sockaddr_in6) &&
177 usin->sin6_scope_id) {
178 /* If interface is set while binding, indices
181 if (sk->sk_bound_dev_if &&
182 sk->sk_bound_dev_if != usin->sin6_scope_id)
185 sk->sk_bound_dev_if = usin->sin6_scope_id;
188 /* Connect to link-local address requires an interface */
189 if (!sk->sk_bound_dev_if)
193 if (tp->rx_opt.ts_recent_stamp &&
194 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
200 sk->sk_v6_daddr = usin->sin6_addr;
201 np->flow_label = fl6.flowlabel;
207 if (addr_type == IPV6_ADDR_MAPPED) {
208 u32 exthdrlen = icsk->icsk_ext_hdr_len;
209 struct sockaddr_in sin;
211 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
213 if (__ipv6_only_sock(sk))
216 sin.sin_family = AF_INET;
217 sin.sin_port = usin->sin6_port;
218 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
220 icsk->icsk_af_ops = &ipv6_mapped;
221 sk->sk_backlog_rcv = tcp_v4_do_rcv;
222 #ifdef CONFIG_TCP_MD5SIG
223 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
226 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
229 icsk->icsk_ext_hdr_len = exthdrlen;
230 icsk->icsk_af_ops = &ipv6_specific;
231 sk->sk_backlog_rcv = tcp_v6_do_rcv;
232 #ifdef CONFIG_TCP_MD5SIG
233 tp->af_specific = &tcp_sock_ipv6_specific;
237 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
238 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
239 &sk->sk_v6_rcv_saddr);
245 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
246 saddr = &sk->sk_v6_rcv_saddr;
248 fl6.flowi6_proto = IPPROTO_TCP;
249 fl6.daddr = sk->sk_v6_daddr;
250 fl6.saddr = saddr ? *saddr : np->saddr;
251 fl6.flowi6_oif = sk->sk_bound_dev_if;
252 fl6.flowi6_mark = sk->sk_mark;
253 fl6.fl6_dport = usin->sin6_port;
254 fl6.fl6_sport = inet->inet_sport;
256 final_p = fl6_update_dst(&fl6, np->opt, &final);
258 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
260 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
268 sk->sk_v6_rcv_saddr = *saddr;
271 /* set the source address */
273 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
275 sk->sk_gso_type = SKB_GSO_TCPV6;
276 __ip6_dst_store(sk, dst, NULL, NULL);
278 rt = (struct rt6_info *) dst;
279 if (tcp_death_row.sysctl_tw_recycle &&
280 !tp->rx_opt.ts_recent_stamp &&
281 ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
282 tcp_fetch_timewait_stamp(sk, dst);
284 icsk->icsk_ext_hdr_len = 0;
286 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
289 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
291 inet->inet_dport = usin->sin6_port;
293 tcp_set_state(sk, TCP_SYN_SENT);
294 err = inet6_hash_connect(&tcp_death_row, sk);
300 if (!tp->write_seq && likely(!tp->repair))
301 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
302 sk->sk_v6_daddr.s6_addr32,
306 err = tcp_connect(sk);
313 tcp_set_state(sk, TCP_CLOSE);
316 inet->inet_dport = 0;
317 sk->sk_route_caps = 0;
321 static void tcp_v6_mtu_reduced(struct sock *sk)
323 struct dst_entry *dst;
325 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
328 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
332 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
333 tcp_sync_mss(sk, dst_mtu(dst));
334 tcp_simple_retransmit(sk);
338 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
339 u8 type, u8 code, int offset, __be32 info)
341 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
342 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
343 struct ipv6_pinfo *np;
347 struct request_sock *fastopen;
349 struct net *net = dev_net(skb->dev);
351 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
352 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
355 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
360 if (sk->sk_state == TCP_TIME_WAIT) {
361 inet_twsk_put(inet_twsk(sk));
366 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
367 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
369 if (sk->sk_state == TCP_CLOSE)
372 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
373 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
378 seq = ntohl(th->seq);
379 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
380 fastopen = tp->fastopen_rsk;
381 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
382 if (sk->sk_state != TCP_LISTEN &&
383 !between(seq, snd_una, tp->snd_nxt)) {
384 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
390 if (type == NDISC_REDIRECT) {
391 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
394 dst->ops->redirect(dst, sk, skb);
398 if (type == ICMPV6_PKT_TOOBIG) {
399 /* We are not interested in TCP_LISTEN and open_requests
400 * (SYN-ACKs send out by Linux are always <576bytes so
401 * they should go through unfragmented).
403 if (sk->sk_state == TCP_LISTEN)
406 if (!ip6_sk_accept_pmtu(sk))
409 tp->mtu_info = ntohl(info);
410 if (!sock_owned_by_user(sk))
411 tcp_v6_mtu_reduced(sk);
412 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
418 icmpv6_err_convert(type, code, &err);
420 /* Might be for an request_sock */
421 switch (sk->sk_state) {
422 struct request_sock *req, **prev;
424 if (sock_owned_by_user(sk))
427 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
428 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
429 &hdr->saddr, inet6_iif(skb));
433 /* ICMPs are not backlogged, hence we cannot get
434 * an established socket here.
436 WARN_ON(req->sk != NULL);
438 if (seq != tcp_rsk(req)->snt_isn) {
439 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
443 inet_csk_reqsk_queue_drop(sk, req, prev);
444 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
449 /* Only in fast or simultaneous open. If a fast open socket is
450 * is already accepted it is treated as a connected one below.
452 if (fastopen && fastopen->sk == NULL)
455 if (!sock_owned_by_user(sk)) {
457 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
461 sk->sk_err_soft = err;
465 if (!sock_owned_by_user(sk) && np->recverr) {
467 sk->sk_error_report(sk);
469 sk->sk_err_soft = err;
477 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
479 struct request_sock *req,
481 struct tcp_fastopen_cookie *foc)
483 struct inet_request_sock *ireq = inet_rsk(req);
484 struct ipv6_pinfo *np = inet6_sk(sk);
485 struct flowi6 *fl6 = &fl->u.ip6;
489 /* First, grab a route. */
490 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
493 skb = tcp_make_synack(sk, dst, req, foc);
496 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
497 &ireq->ir_v6_rmt_addr);
499 fl6->daddr = ireq->ir_v6_rmt_addr;
500 if (np->repflow && (ireq->pktopts != NULL))
501 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
503 skb_set_queue_mapping(skb, queue_mapping);
504 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
505 err = net_xmit_eval(err);
513 static void tcp_v6_reqsk_destructor(struct request_sock *req)
515 kfree_skb(inet_rsk(req)->pktopts);
518 #ifdef CONFIG_TCP_MD5SIG
519 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
520 const struct in6_addr *addr)
522 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
525 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
526 struct sock *addr_sk)
528 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
531 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
532 struct request_sock *req)
534 return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
537 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
540 struct tcp_md5sig cmd;
541 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
543 if (optlen < sizeof(cmd))
546 if (copy_from_user(&cmd, optval, sizeof(cmd)))
549 if (sin6->sin6_family != AF_INET6)
552 if (!cmd.tcpm_keylen) {
553 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
554 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
556 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
560 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
563 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
564 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
565 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
567 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
568 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
571 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
572 const struct in6_addr *daddr,
573 const struct in6_addr *saddr, int nbytes)
575 struct tcp6_pseudohdr *bp;
576 struct scatterlist sg;
578 bp = &hp->md5_blk.ip6;
579 /* 1. TCP pseudo-header (RFC2460) */
582 bp->protocol = cpu_to_be32(IPPROTO_TCP);
583 bp->len = cpu_to_be32(nbytes);
585 sg_init_one(&sg, bp, sizeof(*bp));
586 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
589 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
590 const struct in6_addr *daddr, struct in6_addr *saddr,
591 const struct tcphdr *th)
593 struct tcp_md5sig_pool *hp;
594 struct hash_desc *desc;
596 hp = tcp_get_md5sig_pool();
598 goto clear_hash_noput;
599 desc = &hp->md5_desc;
601 if (crypto_hash_init(desc))
603 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
605 if (tcp_md5_hash_header(hp, th))
607 if (tcp_md5_hash_key(hp, key))
609 if (crypto_hash_final(desc, md5_hash))
612 tcp_put_md5sig_pool();
616 tcp_put_md5sig_pool();
618 memset(md5_hash, 0, 16);
622 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
623 const struct sock *sk,
624 const struct request_sock *req,
625 const struct sk_buff *skb)
627 const struct in6_addr *saddr, *daddr;
628 struct tcp_md5sig_pool *hp;
629 struct hash_desc *desc;
630 const struct tcphdr *th = tcp_hdr(skb);
633 saddr = &inet6_sk(sk)->saddr;
634 daddr = &sk->sk_v6_daddr;
636 saddr = &inet_rsk(req)->ir_v6_loc_addr;
637 daddr = &inet_rsk(req)->ir_v6_rmt_addr;
639 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
640 saddr = &ip6h->saddr;
641 daddr = &ip6h->daddr;
644 hp = tcp_get_md5sig_pool();
646 goto clear_hash_noput;
647 desc = &hp->md5_desc;
649 if (crypto_hash_init(desc))
652 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
654 if (tcp_md5_hash_header(hp, th))
656 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
658 if (tcp_md5_hash_key(hp, key))
660 if (crypto_hash_final(desc, md5_hash))
663 tcp_put_md5sig_pool();
667 tcp_put_md5sig_pool();
669 memset(md5_hash, 0, 16);
673 static int __tcp_v6_inbound_md5_hash(struct sock *sk,
674 const struct sk_buff *skb)
676 const __u8 *hash_location = NULL;
677 struct tcp_md5sig_key *hash_expected;
678 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
679 const struct tcphdr *th = tcp_hdr(skb);
683 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
684 hash_location = tcp_parse_md5sig_option(th);
686 /* We've parsed the options - do we have a hash? */
687 if (!hash_expected && !hash_location)
690 if (hash_expected && !hash_location) {
691 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
695 if (!hash_expected && hash_location) {
696 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
700 /* check the signature */
701 genhash = tcp_v6_md5_hash_skb(newhash,
705 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
706 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
707 genhash ? "failed" : "mismatch",
708 &ip6h->saddr, ntohs(th->source),
709 &ip6h->daddr, ntohs(th->dest));
715 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
720 ret = __tcp_v6_inbound_md5_hash(sk, skb);
728 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
731 struct inet_request_sock *ireq = inet_rsk(req);
732 struct ipv6_pinfo *np = inet6_sk(sk);
734 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
735 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
737 ireq->ir_iif = sk->sk_bound_dev_if;
739 /* So that link locals have meaning */
740 if (!sk->sk_bound_dev_if &&
741 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
742 ireq->ir_iif = tcp_v6_iif(skb);
744 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
745 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
746 np->rxopt.bits.rxinfo ||
747 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
748 np->rxopt.bits.rxohlim || np->repflow)) {
749 atomic_inc(&skb->users);
754 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
755 const struct request_sock *req,
760 return inet6_csk_route_req(sk, &fl->u.ip6, req);
763 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
765 .obj_size = sizeof(struct tcp6_request_sock),
766 .rtx_syn_ack = tcp_rtx_synack,
767 .send_ack = tcp_v6_reqsk_send_ack,
768 .destructor = tcp_v6_reqsk_destructor,
769 .send_reset = tcp_v6_send_reset,
770 .syn_ack_timeout = tcp_syn_ack_timeout,
773 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
774 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
775 sizeof(struct ipv6hdr),
776 #ifdef CONFIG_TCP_MD5SIG
777 .md5_lookup = tcp_v6_reqsk_md5_lookup,
778 .calc_md5_hash = tcp_v6_md5_hash_skb,
780 .init_req = tcp_v6_init_req,
781 #ifdef CONFIG_SYN_COOKIES
782 .cookie_init_seq = cookie_v6_init_sequence,
784 .route_req = tcp_v6_route_req,
785 .init_seq = tcp_v6_init_sequence,
786 .send_synack = tcp_v6_send_synack,
787 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
790 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
791 u32 ack, u32 win, u32 tsval, u32 tsecr,
792 int oif, struct tcp_md5sig_key *key, int rst,
793 u8 tclass, u32 label)
795 const struct tcphdr *th = tcp_hdr(skb);
797 struct sk_buff *buff;
799 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
800 struct sock *ctl_sk = net->ipv6.tcp_sk;
801 unsigned int tot_len = sizeof(struct tcphdr);
802 struct dst_entry *dst;
806 tot_len += TCPOLEN_TSTAMP_ALIGNED;
807 #ifdef CONFIG_TCP_MD5SIG
809 tot_len += TCPOLEN_MD5SIG_ALIGNED;
812 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
817 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
819 t1 = (struct tcphdr *) skb_push(buff, tot_len);
820 skb_reset_transport_header(buff);
822 /* Swap the send and the receive. */
823 memset(t1, 0, sizeof(*t1));
824 t1->dest = th->source;
825 t1->source = th->dest;
826 t1->doff = tot_len / 4;
827 t1->seq = htonl(seq);
828 t1->ack_seq = htonl(ack);
829 t1->ack = !rst || !th->ack;
831 t1->window = htons(win);
833 topt = (__be32 *)(t1 + 1);
836 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
837 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
838 *topt++ = htonl(tsval);
839 *topt++ = htonl(tsecr);
842 #ifdef CONFIG_TCP_MD5SIG
844 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
845 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
846 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
847 &ipv6_hdr(skb)->saddr,
848 &ipv6_hdr(skb)->daddr, t1);
852 memset(&fl6, 0, sizeof(fl6));
853 fl6.daddr = ipv6_hdr(skb)->saddr;
854 fl6.saddr = ipv6_hdr(skb)->daddr;
855 fl6.flowlabel = label;
857 buff->ip_summed = CHECKSUM_PARTIAL;
860 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
862 fl6.flowi6_proto = IPPROTO_TCP;
863 if (rt6_need_strict(&fl6.daddr) && !oif)
864 fl6.flowi6_oif = tcp_v6_iif(skb);
866 fl6.flowi6_oif = oif;
867 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
868 fl6.fl6_dport = t1->dest;
869 fl6.fl6_sport = t1->source;
870 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
872 /* Pass a socket to ip6_dst_lookup either it is for RST
873 * Underlying function will use this to retrieve the network
876 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
878 skb_dst_set(buff, dst);
879 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
880 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
882 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
889 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
891 const struct tcphdr *th = tcp_hdr(skb);
892 u32 seq = 0, ack_seq = 0;
893 struct tcp_md5sig_key *key = NULL;
894 #ifdef CONFIG_TCP_MD5SIG
895 const __u8 *hash_location = NULL;
896 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
897 unsigned char newhash[16];
899 struct sock *sk1 = NULL;
906 /* If sk not NULL, it means we did a successful lookup and incoming
907 * route had to be correct. prequeue might have dropped our dst.
909 if (!sk && !ipv6_unicast_destination(skb))
912 #ifdef CONFIG_TCP_MD5SIG
913 hash_location = tcp_parse_md5sig_option(th);
914 if (!sk && hash_location) {
916 * active side is lost. Try to find listening socket through
917 * source port, and then find md5 key through listening socket.
918 * we are not loose security here:
919 * Incoming packet is checked with md5 hash with finding key,
920 * no RST generated if md5 hash doesn't match.
922 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
923 &tcp_hashinfo, &ipv6h->saddr,
924 th->source, &ipv6h->daddr,
925 ntohs(th->source), tcp_v6_iif(skb));
930 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
934 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
935 if (genhash || memcmp(hash_location, newhash, 16) != 0)
938 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
943 seq = ntohl(th->ack_seq);
945 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
948 oif = sk ? sk->sk_bound_dev_if : 0;
949 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
951 #ifdef CONFIG_TCP_MD5SIG
960 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
961 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
962 struct tcp_md5sig_key *key, u8 tclass,
965 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
969 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
971 struct inet_timewait_sock *tw = inet_twsk(sk);
972 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
974 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
975 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
976 tcp_time_stamp + tcptw->tw_ts_offset,
977 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
978 tw->tw_tclass, (tw->tw_flowlabel << 12));
983 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
984 struct request_sock *req)
986 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
987 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
989 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
990 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
991 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
992 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
993 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
998 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
1000 struct request_sock *req, **prev;
1001 const struct tcphdr *th = tcp_hdr(skb);
1004 /* Find possible connection requests. */
1005 req = inet6_csk_search_req(sk, &prev, th->source,
1006 &ipv6_hdr(skb)->saddr,
1007 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
1009 return tcp_check_req(sk, skb, req, prev, false);
1011 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1012 &ipv6_hdr(skb)->saddr, th->source,
1013 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1017 if (nsk->sk_state != TCP_TIME_WAIT) {
1021 inet_twsk_put(inet_twsk(nsk));
1025 #ifdef CONFIG_SYN_COOKIES
1027 sk = cookie_v6_check(sk, skb);
1032 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1034 if (skb->protocol == htons(ETH_P_IP))
1035 return tcp_v4_conn_request(sk, skb);
1037 if (!ipv6_unicast_destination(skb))
1040 return tcp_conn_request(&tcp6_request_sock_ops,
1041 &tcp_request_sock_ipv6_ops, sk, skb);
1044 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1045 return 0; /* don't send reset */
1048 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1049 struct request_sock *req,
1050 struct dst_entry *dst)
1052 struct inet_request_sock *ireq;
1053 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1054 struct tcp6_sock *newtcp6sk;
1055 struct inet_sock *newinet;
1056 struct tcp_sock *newtp;
1058 #ifdef CONFIG_TCP_MD5SIG
1059 struct tcp_md5sig_key *key;
1063 if (skb->protocol == htons(ETH_P_IP)) {
1068 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1073 newtcp6sk = (struct tcp6_sock *)newsk;
1074 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1076 newinet = inet_sk(newsk);
1077 newnp = inet6_sk(newsk);
1078 newtp = tcp_sk(newsk);
1080 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1082 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1084 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1086 newsk->sk_v6_rcv_saddr = newnp->saddr;
1088 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1089 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1090 #ifdef CONFIG_TCP_MD5SIG
1091 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1094 newnp->ipv6_ac_list = NULL;
1095 newnp->ipv6_fl_list = NULL;
1096 newnp->pktoptions = NULL;
1098 newnp->mcast_oif = tcp_v6_iif(skb);
1099 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1100 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1102 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1105 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1106 * here, tcp_create_openreq_child now does this for us, see the comment in
1107 * that function for the gory details. -acme
1110 /* It is tricky place. Until this moment IPv4 tcp
1111 worked with IPv6 icsk.icsk_af_ops.
1114 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1119 ireq = inet_rsk(req);
1121 if (sk_acceptq_is_full(sk))
1125 dst = inet6_csk_route_req(sk, &fl6, req);
1130 newsk = tcp_create_openreq_child(sk, req, skb);
1135 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1136 * count here, tcp_create_openreq_child now does this for us, see the
1137 * comment in that function for the gory details. -acme
1140 newsk->sk_gso_type = SKB_GSO_TCPV6;
1141 __ip6_dst_store(newsk, dst, NULL, NULL);
1142 inet6_sk_rx_dst_set(newsk, skb);
1144 newtcp6sk = (struct tcp6_sock *)newsk;
1145 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1147 newtp = tcp_sk(newsk);
1148 newinet = inet_sk(newsk);
1149 newnp = inet6_sk(newsk);
1151 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1153 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1154 newnp->saddr = ireq->ir_v6_loc_addr;
1155 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1156 newsk->sk_bound_dev_if = ireq->ir_iif;
1158 ip6_set_txhash(newsk);
1160 /* Now IPv6 options...
1162 First: no IPv4 options.
1164 newinet->inet_opt = NULL;
1165 newnp->ipv6_ac_list = NULL;
1166 newnp->ipv6_fl_list = NULL;
1169 newnp->rxopt.all = np->rxopt.all;
1171 /* Clone pktoptions received with SYN */
1172 newnp->pktoptions = NULL;
1173 if (ireq->pktopts != NULL) {
1174 newnp->pktoptions = skb_clone(ireq->pktopts,
1175 sk_gfp_atomic(sk, GFP_ATOMIC));
1176 consume_skb(ireq->pktopts);
1177 ireq->pktopts = NULL;
1178 if (newnp->pktoptions)
1179 skb_set_owner_r(newnp->pktoptions, newsk);
1182 newnp->mcast_oif = tcp_v6_iif(skb);
1183 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1184 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1186 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1188 /* Clone native IPv6 options from listening socket (if any)
1190 Yes, keeping reference count would be much more clever,
1191 but we make one more one thing there: reattach optmem
1195 newnp->opt = ipv6_dup_options(newsk, np->opt);
1197 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1199 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1200 newnp->opt->opt_flen);
1202 tcp_ca_openreq_child(newsk, dst);
1204 tcp_sync_mss(newsk, dst_mtu(dst));
1205 newtp->advmss = dst_metric_advmss(dst);
1206 if (tcp_sk(sk)->rx_opt.user_mss &&
1207 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1208 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1210 tcp_initialize_rcv_mss(newsk);
1212 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1213 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1215 #ifdef CONFIG_TCP_MD5SIG
1216 /* Copy over the MD5 key from the original socket */
1217 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1219 /* We're using one, so create a matching key
1220 * on the newsk structure. If we fail to get
1221 * memory, then we end up not copying the key
1224 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1225 AF_INET6, key->key, key->keylen,
1226 sk_gfp_atomic(sk, GFP_ATOMIC));
1230 if (__inet_inherit_port(sk, newsk) < 0) {
1231 inet_csk_prepare_forced_close(newsk);
1235 __inet6_hash(newsk, NULL);
1240 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1244 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1248 /* The socket must have it's spinlock held when we get
1251 * We have a potential double-lock case here, so even when
1252 * doing backlog processing we use the BH locking scheme.
1253 * This is because we cannot sleep with the original spinlock
1256 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1258 struct ipv6_pinfo *np = inet6_sk(sk);
1259 struct tcp_sock *tp;
1260 struct sk_buff *opt_skb = NULL;
1262 /* Imagine: socket is IPv6. IPv4 packet arrives,
1263 goes to IPv4 receive handler and backlogged.
1264 From backlog it always goes here. Kerboom...
1265 Fortunately, tcp_rcv_established and rcv_established
1266 handle them correctly, but it is not case with
1267 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1270 if (skb->protocol == htons(ETH_P_IP))
1271 return tcp_v4_do_rcv(sk, skb);
1273 if (sk_filter(sk, skb))
1277 * socket locking is here for SMP purposes as backlog rcv
1278 * is currently called with bh processing disabled.
1281 /* Do Stevens' IPV6_PKTOPTIONS.
1283 Yes, guys, it is the only place in our code, where we
1284 may make it not affecting IPv4.
1285 The rest of code is protocol independent,
1286 and I do not like idea to uglify IPv4.
1288 Actually, all the idea behind IPV6_PKTOPTIONS
1289 looks not very well thought. For now we latch
1290 options, received in the last packet, enqueued
1291 by tcp. Feel free to propose better solution.
1295 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1297 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1298 struct dst_entry *dst = sk->sk_rx_dst;
1300 sock_rps_save_rxhash(sk, skb);
1301 sk_mark_napi_id(sk, skb);
1303 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1304 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1306 sk->sk_rx_dst = NULL;
1310 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1312 goto ipv6_pktoptions;
1316 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1319 if (sk->sk_state == TCP_LISTEN) {
1320 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1325 * Queue it on the new socket if the new socket is active,
1326 * otherwise we just shortcircuit this and continue with
1330 sock_rps_save_rxhash(nsk, skb);
1331 sk_mark_napi_id(sk, skb);
1332 if (tcp_child_process(sk, nsk, skb))
1335 __kfree_skb(opt_skb);
1339 sock_rps_save_rxhash(sk, skb);
1341 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1344 goto ipv6_pktoptions;
1348 tcp_v6_send_reset(sk, skb);
1351 __kfree_skb(opt_skb);
1355 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1356 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1361 /* Do you ask, what is it?
1363 1. skb was enqueued by tcp.
1364 2. skb is added to tail of read queue, rather than out of order.
1365 3. socket is not in passive state.
1366 4. Finally, it really contains options, which user wants to receive.
1369 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1370 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1371 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1372 np->mcast_oif = tcp_v6_iif(opt_skb);
1373 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1374 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1375 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1376 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1378 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1379 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1380 skb_set_owner_r(opt_skb, sk);
1381 opt_skb = xchg(&np->pktoptions, opt_skb);
1383 __kfree_skb(opt_skb);
1384 opt_skb = xchg(&np->pktoptions, NULL);
1392 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1393 const struct tcphdr *th)
1395 /* This is tricky: we move IP6CB at its correct location into
1396 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1397 * _decode_session6() uses IP6CB().
1398 * barrier() makes sure compiler won't play aliasing games.
1400 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1401 sizeof(struct inet6_skb_parm));
1404 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1405 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1406 skb->len - th->doff*4);
1407 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1408 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1409 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1410 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1411 TCP_SKB_CB(skb)->sacked = 0;
1414 static int tcp_v6_rcv(struct sk_buff *skb)
1416 const struct tcphdr *th;
1417 const struct ipv6hdr *hdr;
1420 struct net *net = dev_net(skb->dev);
1422 if (skb->pkt_type != PACKET_HOST)
1426 * Count it even if it's bad.
1428 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1430 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1435 if (th->doff < sizeof(struct tcphdr)/4)
1437 if (!pskb_may_pull(skb, th->doff*4))
1440 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1444 hdr = ipv6_hdr(skb);
1446 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1452 if (sk->sk_state == TCP_TIME_WAIT)
1455 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1456 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1457 goto discard_and_relse;
1460 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1461 goto discard_and_relse;
1463 tcp_v6_fill_cb(skb, hdr, th);
1465 #ifdef CONFIG_TCP_MD5SIG
1466 if (tcp_v6_inbound_md5_hash(sk, skb))
1467 goto discard_and_relse;
1470 if (sk_filter(sk, skb))
1471 goto discard_and_relse;
1473 sk_incoming_cpu_update(sk);
1476 bh_lock_sock_nested(sk);
1478 if (!sock_owned_by_user(sk)) {
1479 if (!tcp_prequeue(sk, skb))
1480 ret = tcp_v6_do_rcv(sk, skb);
1481 } else if (unlikely(sk_add_backlog(sk, skb,
1482 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1484 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1485 goto discard_and_relse;
1490 return ret ? -1 : 0;
1493 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1496 tcp_v6_fill_cb(skb, hdr, th);
1498 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1500 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1502 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1504 tcp_v6_send_reset(NULL, skb);
1516 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1517 inet_twsk_put(inet_twsk(sk));
1521 tcp_v6_fill_cb(skb, hdr, th);
1523 if (skb->len < (th->doff<<2)) {
1524 inet_twsk_put(inet_twsk(sk));
1527 if (tcp_checksum_complete(skb)) {
1528 inet_twsk_put(inet_twsk(sk));
1532 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1537 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1538 &ipv6_hdr(skb)->saddr, th->source,
1539 &ipv6_hdr(skb)->daddr,
1540 ntohs(th->dest), tcp_v6_iif(skb));
1542 struct inet_timewait_sock *tw = inet_twsk(sk);
1543 inet_twsk_deschedule(tw, &tcp_death_row);
1548 /* Fall through to ACK */
1551 tcp_v6_timewait_ack(sk, skb);
1555 case TCP_TW_SUCCESS:
1561 static void tcp_v6_early_demux(struct sk_buff *skb)
1563 const struct ipv6hdr *hdr;
1564 const struct tcphdr *th;
1567 if (skb->pkt_type != PACKET_HOST)
1570 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1573 hdr = ipv6_hdr(skb);
1576 if (th->doff < sizeof(struct tcphdr) / 4)
1579 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1580 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1581 &hdr->saddr, th->source,
1582 &hdr->daddr, ntohs(th->dest),
1586 skb->destructor = sock_edemux;
1587 if (sk->sk_state != TCP_TIME_WAIT) {
1588 struct dst_entry *dst = sk->sk_rx_dst;
1591 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1593 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1594 skb_dst_set_noref(skb, dst);
1599 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1600 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1601 .twsk_unique = tcp_twsk_unique,
1602 .twsk_destructor = tcp_twsk_destructor,
1605 static const struct inet_connection_sock_af_ops ipv6_specific = {
1606 .queue_xmit = inet6_csk_xmit,
1607 .send_check = tcp_v6_send_check,
1608 .rebuild_header = inet6_sk_rebuild_header,
1609 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1610 .conn_request = tcp_v6_conn_request,
1611 .syn_recv_sock = tcp_v6_syn_recv_sock,
1612 .net_header_len = sizeof(struct ipv6hdr),
1613 .net_frag_header_len = sizeof(struct frag_hdr),
1614 .setsockopt = ipv6_setsockopt,
1615 .getsockopt = ipv6_getsockopt,
1616 .addr2sockaddr = inet6_csk_addr2sockaddr,
1617 .sockaddr_len = sizeof(struct sockaddr_in6),
1618 .bind_conflict = inet6_csk_bind_conflict,
1619 #ifdef CONFIG_COMPAT
1620 .compat_setsockopt = compat_ipv6_setsockopt,
1621 .compat_getsockopt = compat_ipv6_getsockopt,
1623 .mtu_reduced = tcp_v6_mtu_reduced,
1626 #ifdef CONFIG_TCP_MD5SIG
1627 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1628 .md5_lookup = tcp_v6_md5_lookup,
1629 .calc_md5_hash = tcp_v6_md5_hash_skb,
1630 .md5_parse = tcp_v6_parse_md5_keys,
1635 * TCP over IPv4 via INET6 API
1637 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1638 .queue_xmit = ip_queue_xmit,
1639 .send_check = tcp_v4_send_check,
1640 .rebuild_header = inet_sk_rebuild_header,
1641 .sk_rx_dst_set = inet_sk_rx_dst_set,
1642 .conn_request = tcp_v6_conn_request,
1643 .syn_recv_sock = tcp_v6_syn_recv_sock,
1644 .net_header_len = sizeof(struct iphdr),
1645 .setsockopt = ipv6_setsockopt,
1646 .getsockopt = ipv6_getsockopt,
1647 .addr2sockaddr = inet6_csk_addr2sockaddr,
1648 .sockaddr_len = sizeof(struct sockaddr_in6),
1649 .bind_conflict = inet6_csk_bind_conflict,
1650 #ifdef CONFIG_COMPAT
1651 .compat_setsockopt = compat_ipv6_setsockopt,
1652 .compat_getsockopt = compat_ipv6_getsockopt,
1654 .mtu_reduced = tcp_v4_mtu_reduced,
1657 #ifdef CONFIG_TCP_MD5SIG
1658 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1659 .md5_lookup = tcp_v4_md5_lookup,
1660 .calc_md5_hash = tcp_v4_md5_hash_skb,
1661 .md5_parse = tcp_v6_parse_md5_keys,
1665 /* NOTE: A lot of things set to zero explicitly by call to
1666 * sk_alloc() so need not be done here.
1668 static int tcp_v6_init_sock(struct sock *sk)
1670 struct inet_connection_sock *icsk = inet_csk(sk);
1674 icsk->icsk_af_ops = &ipv6_specific;
1676 #ifdef CONFIG_TCP_MD5SIG
1677 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1683 static void tcp_v6_destroy_sock(struct sock *sk)
1685 tcp_v4_destroy_sock(sk);
1686 inet6_destroy_sock(sk);
1689 #ifdef CONFIG_PROC_FS
1690 /* Proc filesystem TCPv6 sock list dumping. */
1691 static void get_openreq6(struct seq_file *seq,
1692 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1694 int ttd = req->expires - jiffies;
1695 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1696 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1702 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1703 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1705 src->s6_addr32[0], src->s6_addr32[1],
1706 src->s6_addr32[2], src->s6_addr32[3],
1707 inet_rsk(req)->ir_num,
1708 dest->s6_addr32[0], dest->s6_addr32[1],
1709 dest->s6_addr32[2], dest->s6_addr32[3],
1710 ntohs(inet_rsk(req)->ir_rmt_port),
1712 0, 0, /* could print option size, but that is af dependent. */
1713 1, /* timers active (only the expire timer) */
1714 jiffies_to_clock_t(ttd),
1716 from_kuid_munged(seq_user_ns(seq), uid),
1717 0, /* non standard timer */
1718 0, /* open_requests have no inode */
1722 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1724 const struct in6_addr *dest, *src;
1727 unsigned long timer_expires;
1728 const struct inet_sock *inet = inet_sk(sp);
1729 const struct tcp_sock *tp = tcp_sk(sp);
1730 const struct inet_connection_sock *icsk = inet_csk(sp);
1731 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1733 dest = &sp->sk_v6_daddr;
1734 src = &sp->sk_v6_rcv_saddr;
1735 destp = ntohs(inet->inet_dport);
1736 srcp = ntohs(inet->inet_sport);
1738 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1740 timer_expires = icsk->icsk_timeout;
1741 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1743 timer_expires = icsk->icsk_timeout;
1744 } else if (timer_pending(&sp->sk_timer)) {
1746 timer_expires = sp->sk_timer.expires;
1749 timer_expires = jiffies;
1753 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1754 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1756 src->s6_addr32[0], src->s6_addr32[1],
1757 src->s6_addr32[2], src->s6_addr32[3], srcp,
1758 dest->s6_addr32[0], dest->s6_addr32[1],
1759 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1761 tp->write_seq-tp->snd_una,
1762 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1764 jiffies_delta_to_clock_t(timer_expires - jiffies),
1765 icsk->icsk_retransmits,
1766 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1767 icsk->icsk_probes_out,
1769 atomic_read(&sp->sk_refcnt), sp,
1770 jiffies_to_clock_t(icsk->icsk_rto),
1771 jiffies_to_clock_t(icsk->icsk_ack.ato),
1772 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1774 sp->sk_state == TCP_LISTEN ?
1775 (fastopenq ? fastopenq->max_qlen : 0) :
1776 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1780 static void get_timewait6_sock(struct seq_file *seq,
1781 struct inet_timewait_sock *tw, int i)
1783 const struct in6_addr *dest, *src;
1785 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1787 dest = &tw->tw_v6_daddr;
1788 src = &tw->tw_v6_rcv_saddr;
1789 destp = ntohs(tw->tw_dport);
1790 srcp = ntohs(tw->tw_sport);
1793 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1794 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1796 src->s6_addr32[0], src->s6_addr32[1],
1797 src->s6_addr32[2], src->s6_addr32[3], srcp,
1798 dest->s6_addr32[0], dest->s6_addr32[1],
1799 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1800 tw->tw_substate, 0, 0,
1801 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1802 atomic_read(&tw->tw_refcnt), tw);
1805 static int tcp6_seq_show(struct seq_file *seq, void *v)
1807 struct tcp_iter_state *st;
1808 struct sock *sk = v;
1810 if (v == SEQ_START_TOKEN) {
1815 "st tx_queue rx_queue tr tm->when retrnsmt"
1816 " uid timeout inode\n");
1821 switch (st->state) {
1822 case TCP_SEQ_STATE_LISTENING:
1823 case TCP_SEQ_STATE_ESTABLISHED:
1824 if (sk->sk_state == TCP_TIME_WAIT)
1825 get_timewait6_sock(seq, v, st->num);
1827 get_tcp6_sock(seq, v, st->num);
1829 case TCP_SEQ_STATE_OPENREQ:
1830 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1837 static const struct file_operations tcp6_afinfo_seq_fops = {
1838 .owner = THIS_MODULE,
1839 .open = tcp_seq_open,
1841 .llseek = seq_lseek,
1842 .release = seq_release_net
1845 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1848 .seq_fops = &tcp6_afinfo_seq_fops,
1850 .show = tcp6_seq_show,
1854 int __net_init tcp6_proc_init(struct net *net)
1856 return tcp_proc_register(net, &tcp6_seq_afinfo);
1859 void tcp6_proc_exit(struct net *net)
1861 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1865 static void tcp_v6_clear_sk(struct sock *sk, int size)
1867 struct inet_sock *inet = inet_sk(sk);
1869 /* we do not want to clear pinet6 field, because of RCU lookups */
1870 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1872 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1873 memset(&inet->pinet6 + 1, 0, size);
1876 struct proto tcpv6_prot = {
1878 .owner = THIS_MODULE,
1880 .connect = tcp_v6_connect,
1881 .disconnect = tcp_disconnect,
1882 .accept = inet_csk_accept,
1884 .init = tcp_v6_init_sock,
1885 .destroy = tcp_v6_destroy_sock,
1886 .shutdown = tcp_shutdown,
1887 .setsockopt = tcp_setsockopt,
1888 .getsockopt = tcp_getsockopt,
1889 .recvmsg = tcp_recvmsg,
1890 .sendmsg = tcp_sendmsg,
1891 .sendpage = tcp_sendpage,
1892 .backlog_rcv = tcp_v6_do_rcv,
1893 .release_cb = tcp_release_cb,
1894 .hash = tcp_v6_hash,
1895 .unhash = inet_unhash,
1896 .get_port = inet_csk_get_port,
1897 .enter_memory_pressure = tcp_enter_memory_pressure,
1898 .stream_memory_free = tcp_stream_memory_free,
1899 .sockets_allocated = &tcp_sockets_allocated,
1900 .memory_allocated = &tcp_memory_allocated,
1901 .memory_pressure = &tcp_memory_pressure,
1902 .orphan_count = &tcp_orphan_count,
1903 .sysctl_mem = sysctl_tcp_mem,
1904 .sysctl_wmem = sysctl_tcp_wmem,
1905 .sysctl_rmem = sysctl_tcp_rmem,
1906 .max_header = MAX_TCP_HEADER,
1907 .obj_size = sizeof(struct tcp6_sock),
1908 .slab_flags = SLAB_DESTROY_BY_RCU,
1909 .twsk_prot = &tcp6_timewait_sock_ops,
1910 .rsk_prot = &tcp6_request_sock_ops,
1911 .h.hashinfo = &tcp_hashinfo,
1912 .no_autobind = true,
1913 #ifdef CONFIG_COMPAT
1914 .compat_setsockopt = compat_tcp_setsockopt,
1915 .compat_getsockopt = compat_tcp_getsockopt,
1917 #ifdef CONFIG_MEMCG_KMEM
1918 .proto_cgroup = tcp_proto_cgroup,
1920 .clear_sk = tcp_v6_clear_sk,
1923 static const struct inet6_protocol tcpv6_protocol = {
1924 .early_demux = tcp_v6_early_demux,
1925 .handler = tcp_v6_rcv,
1926 .err_handler = tcp_v6_err,
1927 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1930 static struct inet_protosw tcpv6_protosw = {
1931 .type = SOCK_STREAM,
1932 .protocol = IPPROTO_TCP,
1933 .prot = &tcpv6_prot,
1934 .ops = &inet6_stream_ops,
1935 .flags = INET_PROTOSW_PERMANENT |
1939 static int __net_init tcpv6_net_init(struct net *net)
1941 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1942 SOCK_RAW, IPPROTO_TCP, net);
1945 static void __net_exit tcpv6_net_exit(struct net *net)
1947 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1950 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1952 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1955 static struct pernet_operations tcpv6_net_ops = {
1956 .init = tcpv6_net_init,
1957 .exit = tcpv6_net_exit,
1958 .exit_batch = tcpv6_net_exit_batch,
1961 int __init tcpv6_init(void)
1965 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1969 /* register inet6 protocol */
1970 ret = inet6_register_protosw(&tcpv6_protosw);
1972 goto out_tcpv6_protocol;
1974 ret = register_pernet_subsys(&tcpv6_net_ops);
1976 goto out_tcpv6_protosw;
1981 inet6_unregister_protosw(&tcpv6_protosw);
1983 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1987 void tcpv6_exit(void)
1989 unregister_pernet_subsys(&tcpv6_net_ops);
1990 inet6_unregister_protosw(&tcpv6_protosw);
1991 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);