3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
86 const struct in6_addr *addr)
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 struct dst_entry *dst = skb_dst(skb);
96 if (dst && dst_hold_safe(dst)) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
100 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
101 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
105 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
107 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
108 ipv6_hdr(skb)->saddr.s6_addr32,
110 tcp_hdr(skb)->source);
113 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
116 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
117 struct inet_sock *inet = inet_sk(sk);
118 struct inet_connection_sock *icsk = inet_csk(sk);
119 struct ipv6_pinfo *np = inet6_sk(sk);
120 struct tcp_sock *tp = tcp_sk(sk);
121 struct in6_addr *saddr = NULL, *final_p, final;
122 struct ipv6_txoptions *opt;
124 struct dst_entry *dst;
128 if (addr_len < SIN6_LEN_RFC2133)
131 if (usin->sin6_family != AF_INET6)
132 return -EAFNOSUPPORT;
134 memset(&fl6, 0, sizeof(fl6));
137 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 IP6_ECN_flow_init(fl6.flowlabel);
139 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
140 struct ip6_flowlabel *flowlabel;
141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
144 fl6_sock_release(flowlabel);
149 * connect() to INADDR_ANY means loopback (BSD'ism).
152 if (ipv6_addr_any(&usin->sin6_addr))
153 usin->sin6_addr.s6_addr[15] = 0x1;
155 addr_type = ipv6_addr_type(&usin->sin6_addr);
157 if (addr_type & IPV6_ADDR_MULTICAST)
160 if (addr_type&IPV6_ADDR_LINKLOCAL) {
161 if (addr_len >= sizeof(struct sockaddr_in6) &&
162 usin->sin6_scope_id) {
163 /* If interface is set while binding, indices
166 if (sk->sk_bound_dev_if &&
167 sk->sk_bound_dev_if != usin->sin6_scope_id)
170 sk->sk_bound_dev_if = usin->sin6_scope_id;
173 /* Connect to link-local address requires an interface */
174 if (!sk->sk_bound_dev_if)
178 if (tp->rx_opt.ts_recent_stamp &&
179 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
180 tp->rx_opt.ts_recent = 0;
181 tp->rx_opt.ts_recent_stamp = 0;
185 sk->sk_v6_daddr = usin->sin6_addr;
186 np->flow_label = fl6.flowlabel;
192 if (addr_type == IPV6_ADDR_MAPPED) {
193 u32 exthdrlen = icsk->icsk_ext_hdr_len;
194 struct sockaddr_in sin;
196 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
198 if (__ipv6_only_sock(sk))
201 sin.sin_family = AF_INET;
202 sin.sin_port = usin->sin6_port;
203 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
205 icsk->icsk_af_ops = &ipv6_mapped;
206 sk->sk_backlog_rcv = tcp_v4_do_rcv;
207 #ifdef CONFIG_TCP_MD5SIG
208 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
211 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
214 icsk->icsk_ext_hdr_len = exthdrlen;
215 icsk->icsk_af_ops = &ipv6_specific;
216 sk->sk_backlog_rcv = tcp_v6_do_rcv;
217 #ifdef CONFIG_TCP_MD5SIG
218 tp->af_specific = &tcp_sock_ipv6_specific;
222 np->saddr = sk->sk_v6_rcv_saddr;
227 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
228 saddr = &sk->sk_v6_rcv_saddr;
230 fl6.flowi6_proto = IPPROTO_TCP;
231 fl6.daddr = sk->sk_v6_daddr;
232 fl6.saddr = saddr ? *saddr : np->saddr;
233 fl6.flowi6_oif = sk->sk_bound_dev_if;
234 fl6.flowi6_mark = sk->sk_mark;
235 fl6.fl6_dport = usin->sin6_port;
236 fl6.fl6_sport = inet->inet_sport;
237 fl6.flowi6_uid = sk->sk_uid;
239 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
240 final_p = fl6_update_dst(&fl6, opt, &final);
242 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
244 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
252 sk->sk_v6_rcv_saddr = *saddr;
255 /* set the source address */
257 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
259 sk->sk_gso_type = SKB_GSO_TCPV6;
260 ip6_dst_store(sk, dst, NULL, NULL);
262 if (tcp_death_row.sysctl_tw_recycle &&
263 !tp->rx_opt.ts_recent_stamp &&
264 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
265 tcp_fetch_timewait_stamp(sk, dst);
267 icsk->icsk_ext_hdr_len = 0;
269 icsk->icsk_ext_hdr_len = opt->opt_flen +
272 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
274 inet->inet_dport = usin->sin6_port;
276 tcp_set_state(sk, TCP_SYN_SENT);
277 err = inet6_hash_connect(&tcp_death_row, sk);
283 if (!tp->write_seq && likely(!tp->repair))
284 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
285 sk->sk_v6_daddr.s6_addr32,
289 err = tcp_connect(sk);
296 tcp_set_state(sk, TCP_CLOSE);
299 inet->inet_dport = 0;
300 sk->sk_route_caps = 0;
304 static void tcp_v6_mtu_reduced(struct sock *sk)
306 struct dst_entry *dst;
308 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
311 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
315 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
316 tcp_sync_mss(sk, dst_mtu(dst));
317 tcp_simple_retransmit(sk);
321 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
322 u8 type, u8 code, int offset, __be32 info)
324 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
325 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
326 struct net *net = dev_net(skb->dev);
327 struct request_sock *fastopen;
328 struct ipv6_pinfo *np;
335 sk = __inet6_lookup_established(net, &tcp_hashinfo,
336 &hdr->daddr, th->dest,
337 &hdr->saddr, ntohs(th->source),
341 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
346 if (sk->sk_state == TCP_TIME_WAIT) {
347 inet_twsk_put(inet_twsk(sk));
350 seq = ntohl(th->seq);
351 fatal = icmpv6_err_convert(type, code, &err);
352 if (sk->sk_state == TCP_NEW_SYN_RECV)
353 return tcp_req_err(sk, seq, fatal);
356 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
357 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
359 if (sk->sk_state == TCP_CLOSE)
362 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
363 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
368 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
369 fastopen = tp->fastopen_rsk;
370 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
371 if (sk->sk_state != TCP_LISTEN &&
372 !between(seq, snd_una, tp->snd_nxt)) {
373 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
379 if (type == NDISC_REDIRECT) {
380 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
383 dst->ops->redirect(dst, sk, skb);
387 if (type == ICMPV6_PKT_TOOBIG) {
388 /* We are not interested in TCP_LISTEN and open_requests
389 * (SYN-ACKs send out by Linux are always <576bytes so
390 * they should go through unfragmented).
392 if (sk->sk_state == TCP_LISTEN)
395 if (!ip6_sk_accept_pmtu(sk))
398 tp->mtu_info = ntohl(info);
399 if (!sock_owned_by_user(sk))
400 tcp_v6_mtu_reduced(sk);
401 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
408 /* Might be for an request_sock */
409 switch (sk->sk_state) {
412 /* Only in fast or simultaneous open. If a fast open socket is
413 * is already accepted it is treated as a connected one below.
415 if (fastopen && !fastopen->sk)
418 if (!sock_owned_by_user(sk)) {
420 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
424 sk->sk_err_soft = err;
428 if (!sock_owned_by_user(sk) && np->recverr) {
430 sk->sk_error_report(sk);
432 sk->sk_err_soft = err;
440 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
442 struct request_sock *req,
443 struct tcp_fastopen_cookie *foc,
446 struct inet_request_sock *ireq = inet_rsk(req);
447 struct ipv6_pinfo *np = inet6_sk(sk);
448 struct flowi6 *fl6 = &fl->u.ip6;
452 /* First, grab a route. */
453 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
454 IPPROTO_TCP)) == NULL)
457 skb = tcp_make_synack(sk, dst, req, foc, attach_req);
460 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
461 &ireq->ir_v6_rmt_addr);
463 fl6->daddr = ireq->ir_v6_rmt_addr;
464 if (np->repflow && ireq->pktopts)
465 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
468 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
471 err = net_xmit_eval(err);
479 static void tcp_v6_reqsk_destructor(struct request_sock *req)
481 kfree_skb(inet_rsk(req)->pktopts);
484 #ifdef CONFIG_TCP_MD5SIG
485 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
486 const struct in6_addr *addr)
488 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
491 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
492 const struct sock *addr_sk)
494 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
497 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
500 struct tcp_md5sig cmd;
501 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
503 if (optlen < sizeof(cmd))
506 if (copy_from_user(&cmd, optval, sizeof(cmd)))
509 if (sin6->sin6_family != AF_INET6)
512 if (!cmd.tcpm_keylen) {
513 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
514 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
516 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
520 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
523 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
524 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
525 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
527 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
528 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
531 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
532 const struct in6_addr *daddr,
533 const struct in6_addr *saddr, int nbytes)
535 struct tcp6_pseudohdr *bp;
536 struct scatterlist sg;
538 bp = &hp->md5_blk.ip6;
539 /* 1. TCP pseudo-header (RFC2460) */
542 bp->protocol = cpu_to_be32(IPPROTO_TCP);
543 bp->len = cpu_to_be32(nbytes);
545 sg_init_one(&sg, bp, sizeof(*bp));
546 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
549 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
550 const struct in6_addr *daddr, struct in6_addr *saddr,
551 const struct tcphdr *th)
553 struct tcp_md5sig_pool *hp;
554 struct hash_desc *desc;
556 hp = tcp_get_md5sig_pool();
558 goto clear_hash_noput;
559 desc = &hp->md5_desc;
561 if (crypto_hash_init(desc))
563 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
565 if (tcp_md5_hash_header(hp, th))
567 if (tcp_md5_hash_key(hp, key))
569 if (crypto_hash_final(desc, md5_hash))
572 tcp_put_md5sig_pool();
576 tcp_put_md5sig_pool();
578 memset(md5_hash, 0, 16);
582 static int tcp_v6_md5_hash_skb(char *md5_hash,
583 const struct tcp_md5sig_key *key,
584 const struct sock *sk,
585 const struct sk_buff *skb)
587 const struct in6_addr *saddr, *daddr;
588 struct tcp_md5sig_pool *hp;
589 struct hash_desc *desc;
590 const struct tcphdr *th = tcp_hdr(skb);
592 if (sk) { /* valid for establish/request sockets */
593 saddr = &sk->sk_v6_rcv_saddr;
594 daddr = &sk->sk_v6_daddr;
596 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
597 saddr = &ip6h->saddr;
598 daddr = &ip6h->daddr;
601 hp = tcp_get_md5sig_pool();
603 goto clear_hash_noput;
604 desc = &hp->md5_desc;
606 if (crypto_hash_init(desc))
609 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
611 if (tcp_md5_hash_header(hp, th))
613 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
615 if (tcp_md5_hash_key(hp, key))
617 if (crypto_hash_final(desc, md5_hash))
620 tcp_put_md5sig_pool();
624 tcp_put_md5sig_pool();
626 memset(md5_hash, 0, 16);
632 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
633 const struct sk_buff *skb)
635 #ifdef CONFIG_TCP_MD5SIG
636 const __u8 *hash_location = NULL;
637 struct tcp_md5sig_key *hash_expected;
638 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
639 const struct tcphdr *th = tcp_hdr(skb);
643 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
644 hash_location = tcp_parse_md5sig_option(th);
646 /* We've parsed the options - do we have a hash? */
647 if (!hash_expected && !hash_location)
650 if (hash_expected && !hash_location) {
651 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
655 if (!hash_expected && hash_location) {
656 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
660 /* check the signature */
661 genhash = tcp_v6_md5_hash_skb(newhash,
665 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
666 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
667 genhash ? "failed" : "mismatch",
668 &ip6h->saddr, ntohs(th->source),
669 &ip6h->daddr, ntohs(th->dest));
676 static void tcp_v6_init_req(struct request_sock *req,
677 const struct sock *sk_listener,
680 struct inet_request_sock *ireq = inet_rsk(req);
681 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
683 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
684 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
686 /* So that link locals have meaning */
687 if (!sk_listener->sk_bound_dev_if &&
688 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
689 ireq->ir_iif = tcp_v6_iif(skb);
691 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
692 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
693 np->rxopt.bits.rxinfo ||
694 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
695 np->rxopt.bits.rxohlim || np->repflow)) {
696 atomic_inc(&skb->users);
701 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
703 const struct request_sock *req,
708 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
711 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
713 .obj_size = sizeof(struct tcp6_request_sock),
714 .rtx_syn_ack = tcp_rtx_synack,
715 .send_ack = tcp_v6_reqsk_send_ack,
716 .destructor = tcp_v6_reqsk_destructor,
717 .send_reset = tcp_v6_send_reset,
718 .syn_ack_timeout = tcp_syn_ack_timeout,
721 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
722 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
723 sizeof(struct ipv6hdr),
724 #ifdef CONFIG_TCP_MD5SIG
725 .req_md5_lookup = tcp_v6_md5_lookup,
726 .calc_md5_hash = tcp_v6_md5_hash_skb,
728 .init_req = tcp_v6_init_req,
729 #ifdef CONFIG_SYN_COOKIES
730 .cookie_init_seq = cookie_v6_init_sequence,
732 .route_req = tcp_v6_route_req,
733 .init_seq = tcp_v6_init_sequence,
734 .send_synack = tcp_v6_send_synack,
737 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
738 u32 ack, u32 win, u32 tsval, u32 tsecr,
739 int oif, struct tcp_md5sig_key *key, int rst,
740 u8 tclass, u32 label)
742 const struct tcphdr *th = tcp_hdr(skb);
744 struct sk_buff *buff;
746 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
747 struct sock *ctl_sk = net->ipv6.tcp_sk;
748 unsigned int tot_len = sizeof(struct tcphdr);
749 struct dst_entry *dst;
753 tot_len += TCPOLEN_TSTAMP_ALIGNED;
754 #ifdef CONFIG_TCP_MD5SIG
756 tot_len += TCPOLEN_MD5SIG_ALIGNED;
759 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
764 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
766 t1 = (struct tcphdr *) skb_push(buff, tot_len);
767 skb_reset_transport_header(buff);
769 /* Swap the send and the receive. */
770 memset(t1, 0, sizeof(*t1));
771 t1->dest = th->source;
772 t1->source = th->dest;
773 t1->doff = tot_len / 4;
774 t1->seq = htonl(seq);
775 t1->ack_seq = htonl(ack);
776 t1->ack = !rst || !th->ack;
778 t1->window = htons(win);
780 topt = (__be32 *)(t1 + 1);
783 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
784 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
785 *topt++ = htonl(tsval);
786 *topt++ = htonl(tsecr);
789 #ifdef CONFIG_TCP_MD5SIG
791 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
792 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
793 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
794 &ipv6_hdr(skb)->saddr,
795 &ipv6_hdr(skb)->daddr, t1);
799 memset(&fl6, 0, sizeof(fl6));
800 fl6.daddr = ipv6_hdr(skb)->saddr;
801 fl6.saddr = ipv6_hdr(skb)->daddr;
802 fl6.flowlabel = label;
804 buff->ip_summed = CHECKSUM_PARTIAL;
807 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
809 fl6.flowi6_proto = IPPROTO_TCP;
810 if (rt6_need_strict(&fl6.daddr) && !oif)
811 fl6.flowi6_oif = tcp_v6_iif(skb);
813 fl6.flowi6_oif = oif;
814 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
815 fl6.fl6_dport = t1->dest;
816 fl6.fl6_sport = t1->source;
817 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
818 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
820 /* Pass a socket to ip6_dst_lookup either it is for RST
821 * Underlying function will use this to retrieve the network
824 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
826 skb_dst_set(buff, dst);
827 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
828 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
830 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
837 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
839 const struct tcphdr *th = tcp_hdr(skb);
840 u32 seq = 0, ack_seq = 0;
841 struct tcp_md5sig_key *key = NULL;
842 #ifdef CONFIG_TCP_MD5SIG
843 const __u8 *hash_location = NULL;
844 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
845 unsigned char newhash[16];
847 struct sock *sk1 = NULL;
854 /* If sk not NULL, it means we did a successful lookup and incoming
855 * route had to be correct. prequeue might have dropped our dst.
857 if (!sk && !ipv6_unicast_destination(skb))
860 #ifdef CONFIG_TCP_MD5SIG
861 hash_location = tcp_parse_md5sig_option(th);
862 if (!sk && hash_location) {
864 * active side is lost. Try to find listening socket through
865 * source port, and then find md5 key through listening socket.
866 * we are not loose security here:
867 * Incoming packet is checked with md5 hash with finding key,
868 * no RST generated if md5 hash doesn't match.
870 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
871 &tcp_hashinfo, &ipv6h->saddr,
872 th->source, &ipv6h->daddr,
873 ntohs(th->source), tcp_v6_iif(skb));
878 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
882 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
883 if (genhash || memcmp(hash_location, newhash, 16) != 0)
886 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
891 seq = ntohl(th->ack_seq);
893 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
896 oif = sk ? sk->sk_bound_dev_if : 0;
897 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
899 #ifdef CONFIG_TCP_MD5SIG
908 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
909 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
910 struct tcp_md5sig_key *key, u8 tclass,
913 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
917 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
919 struct inet_timewait_sock *tw = inet_twsk(sk);
920 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
922 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
923 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
924 tcp_time_stamp + tcptw->tw_ts_offset,
925 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
926 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
931 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
932 struct request_sock *req)
934 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
935 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
938 * The window field (SEG.WND) of every outgoing segment, with the
939 * exception of <SYN> segments, MUST be right-shifted by
940 * Rcv.Wind.Shift bits:
942 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
943 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
944 tcp_rsk(req)->rcv_nxt,
945 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
946 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
947 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
952 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
954 #ifdef CONFIG_SYN_COOKIES
955 const struct tcphdr *th = tcp_hdr(skb);
958 sk = cookie_v6_check(sk, skb);
963 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
965 if (skb->protocol == htons(ETH_P_IP))
966 return tcp_v4_conn_request(sk, skb);
968 if (!ipv6_unicast_destination(skb))
971 return tcp_conn_request(&tcp6_request_sock_ops,
972 &tcp_request_sock_ipv6_ops, sk, skb);
975 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
976 return 0; /* don't send reset */
979 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
980 struct request_sock *req,
981 struct dst_entry *dst,
982 struct request_sock *req_unhash,
985 struct inet_request_sock *ireq;
986 struct ipv6_pinfo *newnp;
987 const struct ipv6_pinfo *np = inet6_sk(sk);
988 struct ipv6_txoptions *opt;
989 struct tcp6_sock *newtcp6sk;
990 struct inet_sock *newinet;
991 struct tcp_sock *newtp;
993 #ifdef CONFIG_TCP_MD5SIG
994 struct tcp_md5sig_key *key;
998 if (skb->protocol == htons(ETH_P_IP)) {
1003 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1004 req_unhash, own_req);
1009 newtcp6sk = (struct tcp6_sock *)newsk;
1010 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1012 newinet = inet_sk(newsk);
1013 newnp = inet6_sk(newsk);
1014 newtp = tcp_sk(newsk);
1016 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1018 newnp->saddr = newsk->sk_v6_rcv_saddr;
1020 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1021 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1022 #ifdef CONFIG_TCP_MD5SIG
1023 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1026 newnp->ipv6_ac_list = NULL;
1027 newnp->ipv6_fl_list = NULL;
1028 newnp->pktoptions = NULL;
1030 newnp->mcast_oif = tcp_v6_iif(skb);
1031 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1032 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1034 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1037 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1038 * here, tcp_create_openreq_child now does this for us, see the comment in
1039 * that function for the gory details. -acme
1042 /* It is tricky place. Until this moment IPv4 tcp
1043 worked with IPv6 icsk.icsk_af_ops.
1046 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1051 ireq = inet_rsk(req);
1053 if (sk_acceptq_is_full(sk))
1057 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1062 newsk = tcp_create_openreq_child(sk, req, skb);
1067 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1068 * count here, tcp_create_openreq_child now does this for us, see the
1069 * comment in that function for the gory details. -acme
1072 newsk->sk_gso_type = SKB_GSO_TCPV6;
1073 ip6_dst_store(newsk, dst, NULL, NULL);
1074 inet6_sk_rx_dst_set(newsk, skb);
1076 newtcp6sk = (struct tcp6_sock *)newsk;
1077 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1079 newtp = tcp_sk(newsk);
1080 newinet = inet_sk(newsk);
1081 newnp = inet6_sk(newsk);
1083 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1085 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1086 newnp->saddr = ireq->ir_v6_loc_addr;
1087 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1088 newsk->sk_bound_dev_if = ireq->ir_iif;
1090 /* Now IPv6 options...
1092 First: no IPv4 options.
1094 newinet->inet_opt = NULL;
1095 newnp->ipv6_ac_list = NULL;
1096 newnp->ipv6_fl_list = NULL;
1099 newnp->rxopt.all = np->rxopt.all;
1101 newnp->pktoptions = NULL;
1103 newnp->mcast_oif = tcp_v6_iif(skb);
1104 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1105 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1107 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1109 /* Clone native IPv6 options from listening socket (if any)
1111 Yes, keeping reference count would be much more clever,
1112 but we make one more one thing there: reattach optmem
1115 opt = rcu_dereference(np->opt);
1117 opt = ipv6_dup_options(newsk, opt);
1118 RCU_INIT_POINTER(newnp->opt, opt);
1120 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1122 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1125 tcp_ca_openreq_child(newsk, dst);
1127 tcp_sync_mss(newsk, dst_mtu(dst));
1128 newtp->advmss = dst_metric_advmss(dst);
1129 if (tcp_sk(sk)->rx_opt.user_mss &&
1130 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1131 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1133 tcp_initialize_rcv_mss(newsk);
1135 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1136 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1138 #ifdef CONFIG_TCP_MD5SIG
1139 /* Copy over the MD5 key from the original socket */
1140 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1142 /* We're using one, so create a matching key
1143 * on the newsk structure. If we fail to get
1144 * memory, then we end up not copying the key
1147 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1148 AF_INET6, key->key, key->keylen,
1149 sk_gfp_atomic(sk, GFP_ATOMIC));
1153 if (__inet_inherit_port(sk, newsk) < 0) {
1154 inet_csk_prepare_forced_close(newsk);
1158 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1160 tcp_move_syn(newtp, req);
1162 /* Clone pktoptions received with SYN, if we own the req */
1163 if (ireq->pktopts) {
1164 newnp->pktoptions = skb_clone(ireq->pktopts,
1165 sk_gfp_atomic(sk, GFP_ATOMIC));
1166 consume_skb(ireq->pktopts);
1167 ireq->pktopts = NULL;
1168 if (newnp->pktoptions)
1169 skb_set_owner_r(newnp->pktoptions, newsk);
1176 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1180 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1184 static void tcp_v6_restore_cb(struct sk_buff *skb)
1186 /* We need to move header back to the beginning if xfrm6_policy_check()
1187 * and tcp_v6_fill_cb() are going to be called again.
1188 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1190 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1191 sizeof(struct inet6_skb_parm));
1194 /* The socket must have it's spinlock held when we get
1195 * here, unless it is a TCP_LISTEN socket.
1197 * We have a potential double-lock case here, so even when
1198 * doing backlog processing we use the BH locking scheme.
1199 * This is because we cannot sleep with the original spinlock
1202 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1204 struct ipv6_pinfo *np = inet6_sk(sk);
1205 struct tcp_sock *tp;
1206 struct sk_buff *opt_skb = NULL;
1208 /* Imagine: socket is IPv6. IPv4 packet arrives,
1209 goes to IPv4 receive handler and backlogged.
1210 From backlog it always goes here. Kerboom...
1211 Fortunately, tcp_rcv_established and rcv_established
1212 handle them correctly, but it is not case with
1213 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1216 if (skb->protocol == htons(ETH_P_IP))
1217 return tcp_v4_do_rcv(sk, skb);
1219 if (tcp_filter(sk, skb))
1223 * socket locking is here for SMP purposes as backlog rcv
1224 * is currently called with bh processing disabled.
1227 /* Do Stevens' IPV6_PKTOPTIONS.
1229 Yes, guys, it is the only place in our code, where we
1230 may make it not affecting IPv4.
1231 The rest of code is protocol independent,
1232 and I do not like idea to uglify IPv4.
1234 Actually, all the idea behind IPV6_PKTOPTIONS
1235 looks not very well thought. For now we latch
1236 options, received in the last packet, enqueued
1237 by tcp. Feel free to propose better solution.
1241 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1243 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1244 struct dst_entry *dst = sk->sk_rx_dst;
1246 sock_rps_save_rxhash(sk, skb);
1247 sk_mark_napi_id(sk, skb);
1249 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1250 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1252 sk->sk_rx_dst = NULL;
1256 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1258 goto ipv6_pktoptions;
1262 if (tcp_checksum_complete(skb))
1265 if (sk->sk_state == TCP_LISTEN) {
1266 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1272 sock_rps_save_rxhash(nsk, skb);
1273 sk_mark_napi_id(nsk, skb);
1274 if (tcp_child_process(sk, nsk, skb))
1277 __kfree_skb(opt_skb);
1281 sock_rps_save_rxhash(sk, skb);
1283 if (tcp_rcv_state_process(sk, skb))
1286 goto ipv6_pktoptions;
1290 tcp_v6_send_reset(sk, skb);
1293 __kfree_skb(opt_skb);
1297 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1298 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1303 /* Do you ask, what is it?
1305 1. skb was enqueued by tcp.
1306 2. skb is added to tail of read queue, rather than out of order.
1307 3. socket is not in passive state.
1308 4. Finally, it really contains options, which user wants to receive.
1311 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1312 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1313 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1314 np->mcast_oif = tcp_v6_iif(opt_skb);
1315 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1316 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1317 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1318 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1320 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1321 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1322 skb_set_owner_r(opt_skb, sk);
1323 tcp_v6_restore_cb(opt_skb);
1324 opt_skb = xchg(&np->pktoptions, opt_skb);
1326 __kfree_skb(opt_skb);
1327 opt_skb = xchg(&np->pktoptions, NULL);
1335 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1336 const struct tcphdr *th)
1338 /* This is tricky: we move IP6CB at its correct location into
1339 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1340 * _decode_session6() uses IP6CB().
1341 * barrier() makes sure compiler won't play aliasing games.
1343 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1344 sizeof(struct inet6_skb_parm));
1347 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1348 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1349 skb->len - th->doff*4);
1350 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1351 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1352 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1353 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1354 TCP_SKB_CB(skb)->sacked = 0;
1357 static int tcp_v6_rcv(struct sk_buff *skb)
1359 const struct tcphdr *th;
1360 const struct ipv6hdr *hdr;
1363 struct net *net = dev_net(skb->dev);
1365 if (skb->pkt_type != PACKET_HOST)
1369 * Count it even if it's bad.
1371 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1373 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1378 if (th->doff < sizeof(struct tcphdr)/4)
1380 if (!pskb_may_pull(skb, th->doff*4))
1383 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1387 hdr = ipv6_hdr(skb);
1390 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1396 if (sk->sk_state == TCP_TIME_WAIT)
1399 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1400 struct request_sock *req = inet_reqsk(sk);
1403 sk = req->rsk_listener;
1404 tcp_v6_fill_cb(skb, hdr, th);
1405 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1409 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1410 inet_csk_reqsk_queue_drop_and_put(sk, req);
1414 nsk = tcp_check_req(sk, skb, req, false);
1417 goto discard_and_relse;
1421 tcp_v6_restore_cb(skb);
1422 } else if (tcp_child_process(sk, nsk, skb)) {
1423 tcp_v6_send_reset(nsk, skb);
1424 goto discard_and_relse;
1430 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1431 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1432 goto discard_and_relse;
1435 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1436 goto discard_and_relse;
1438 tcp_v6_fill_cb(skb, hdr, th);
1440 if (tcp_v6_inbound_md5_hash(sk, skb))
1441 goto discard_and_relse;
1443 if (tcp_filter(sk, skb))
1444 goto discard_and_relse;
1445 th = (const struct tcphdr *)skb->data;
1446 hdr = ipv6_hdr(skb);
1450 if (sk->sk_state == TCP_LISTEN) {
1451 ret = tcp_v6_do_rcv(sk, skb);
1452 goto put_and_return;
1455 sk_incoming_cpu_update(sk);
1457 bh_lock_sock_nested(sk);
1458 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1460 if (!sock_owned_by_user(sk)) {
1461 if (!tcp_prequeue(sk, skb))
1462 ret = tcp_v6_do_rcv(sk, skb);
1463 } else if (unlikely(sk_add_backlog(sk, skb,
1464 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1466 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1467 goto discard_and_relse;
1473 return ret ? -1 : 0;
1476 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1479 tcp_v6_fill_cb(skb, hdr, th);
1481 if (tcp_checksum_complete(skb)) {
1483 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1485 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1487 tcp_v6_send_reset(NULL, skb);
1499 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1500 inet_twsk_put(inet_twsk(sk));
1504 tcp_v6_fill_cb(skb, hdr, th);
1506 if (tcp_checksum_complete(skb)) {
1507 inet_twsk_put(inet_twsk(sk));
1511 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1516 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1517 &ipv6_hdr(skb)->saddr, th->source,
1518 &ipv6_hdr(skb)->daddr,
1519 ntohs(th->dest), tcp_v6_iif(skb));
1521 struct inet_timewait_sock *tw = inet_twsk(sk);
1522 inet_twsk_deschedule_put(tw);
1524 tcp_v6_restore_cb(skb);
1527 /* Fall through to ACK */
1530 tcp_v6_timewait_ack(sk, skb);
1533 tcp_v6_restore_cb(skb);
1535 case TCP_TW_SUCCESS:
1541 static void tcp_v6_early_demux(struct sk_buff *skb)
1543 const struct ipv6hdr *hdr;
1544 const struct tcphdr *th;
1547 if (skb->pkt_type != PACKET_HOST)
1550 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1553 hdr = ipv6_hdr(skb);
1556 if (th->doff < sizeof(struct tcphdr) / 4)
1559 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1560 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1561 &hdr->saddr, th->source,
1562 &hdr->daddr, ntohs(th->dest),
1566 skb->destructor = sock_edemux;
1567 if (sk_fullsock(sk)) {
1568 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1571 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1573 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1574 skb_dst_set_noref(skb, dst);
1579 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1580 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1581 .twsk_unique = tcp_twsk_unique,
1582 .twsk_destructor = tcp_twsk_destructor,
1585 static const struct inet_connection_sock_af_ops ipv6_specific = {
1586 .queue_xmit = inet6_csk_xmit,
1587 .send_check = tcp_v6_send_check,
1588 .rebuild_header = inet6_sk_rebuild_header,
1589 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1590 .conn_request = tcp_v6_conn_request,
1591 .syn_recv_sock = tcp_v6_syn_recv_sock,
1592 .net_header_len = sizeof(struct ipv6hdr),
1593 .net_frag_header_len = sizeof(struct frag_hdr),
1594 .setsockopt = ipv6_setsockopt,
1595 .getsockopt = ipv6_getsockopt,
1596 .addr2sockaddr = inet6_csk_addr2sockaddr,
1597 .sockaddr_len = sizeof(struct sockaddr_in6),
1598 .bind_conflict = inet6_csk_bind_conflict,
1599 #ifdef CONFIG_COMPAT
1600 .compat_setsockopt = compat_ipv6_setsockopt,
1601 .compat_getsockopt = compat_ipv6_getsockopt,
1603 .mtu_reduced = tcp_v6_mtu_reduced,
1606 #ifdef CONFIG_TCP_MD5SIG
1607 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1608 .md5_lookup = tcp_v6_md5_lookup,
1609 .calc_md5_hash = tcp_v6_md5_hash_skb,
1610 .md5_parse = tcp_v6_parse_md5_keys,
1615 * TCP over IPv4 via INET6 API
1617 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1618 .queue_xmit = ip_queue_xmit,
1619 .send_check = tcp_v4_send_check,
1620 .rebuild_header = inet_sk_rebuild_header,
1621 .sk_rx_dst_set = inet_sk_rx_dst_set,
1622 .conn_request = tcp_v6_conn_request,
1623 .syn_recv_sock = tcp_v6_syn_recv_sock,
1624 .net_header_len = sizeof(struct iphdr),
1625 .setsockopt = ipv6_setsockopt,
1626 .getsockopt = ipv6_getsockopt,
1627 .addr2sockaddr = inet6_csk_addr2sockaddr,
1628 .sockaddr_len = sizeof(struct sockaddr_in6),
1629 .bind_conflict = inet6_csk_bind_conflict,
1630 #ifdef CONFIG_COMPAT
1631 .compat_setsockopt = compat_ipv6_setsockopt,
1632 .compat_getsockopt = compat_ipv6_getsockopt,
1634 .mtu_reduced = tcp_v4_mtu_reduced,
1637 #ifdef CONFIG_TCP_MD5SIG
1638 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1639 .md5_lookup = tcp_v4_md5_lookup,
1640 .calc_md5_hash = tcp_v4_md5_hash_skb,
1641 .md5_parse = tcp_v6_parse_md5_keys,
1645 /* NOTE: A lot of things set to zero explicitly by call to
1646 * sk_alloc() so need not be done here.
1648 static int tcp_v6_init_sock(struct sock *sk)
1650 struct inet_connection_sock *icsk = inet_csk(sk);
1654 icsk->icsk_af_ops = &ipv6_specific;
1656 #ifdef CONFIG_TCP_MD5SIG
1657 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1663 static void tcp_v6_destroy_sock(struct sock *sk)
1665 tcp_v4_destroy_sock(sk);
1666 inet6_destroy_sock(sk);
1669 #ifdef CONFIG_PROC_FS
1670 /* Proc filesystem TCPv6 sock list dumping. */
1671 static void get_openreq6(struct seq_file *seq,
1672 const struct request_sock *req, int i)
1674 long ttd = req->rsk_timer.expires - jiffies;
1675 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1676 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1682 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1683 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1685 src->s6_addr32[0], src->s6_addr32[1],
1686 src->s6_addr32[2], src->s6_addr32[3],
1687 inet_rsk(req)->ir_num,
1688 dest->s6_addr32[0], dest->s6_addr32[1],
1689 dest->s6_addr32[2], dest->s6_addr32[3],
1690 ntohs(inet_rsk(req)->ir_rmt_port),
1692 0, 0, /* could print option size, but that is af dependent. */
1693 1, /* timers active (only the expire timer) */
1694 jiffies_to_clock_t(ttd),
1696 from_kuid_munged(seq_user_ns(seq),
1697 sock_i_uid(req->rsk_listener)),
1698 0, /* non standard timer */
1699 0, /* open_requests have no inode */
1703 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1705 const struct in6_addr *dest, *src;
1708 unsigned long timer_expires;
1709 const struct inet_sock *inet = inet_sk(sp);
1710 const struct tcp_sock *tp = tcp_sk(sp);
1711 const struct inet_connection_sock *icsk = inet_csk(sp);
1712 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1716 dest = &sp->sk_v6_daddr;
1717 src = &sp->sk_v6_rcv_saddr;
1718 destp = ntohs(inet->inet_dport);
1719 srcp = ntohs(inet->inet_sport);
1721 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1722 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1723 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1725 timer_expires = icsk->icsk_timeout;
1726 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1728 timer_expires = icsk->icsk_timeout;
1729 } else if (timer_pending(&sp->sk_timer)) {
1731 timer_expires = sp->sk_timer.expires;
1734 timer_expires = jiffies;
1737 state = sk_state_load(sp);
1738 if (state == TCP_LISTEN)
1739 rx_queue = sp->sk_ack_backlog;
1741 /* Because we don't lock the socket,
1742 * we might find a transient negative value.
1744 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1747 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1748 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1750 src->s6_addr32[0], src->s6_addr32[1],
1751 src->s6_addr32[2], src->s6_addr32[3], srcp,
1752 dest->s6_addr32[0], dest->s6_addr32[1],
1753 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1755 tp->write_seq - tp->snd_una,
1758 jiffies_delta_to_clock_t(timer_expires - jiffies),
1759 icsk->icsk_retransmits,
1760 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1761 icsk->icsk_probes_out,
1763 atomic_read(&sp->sk_refcnt), sp,
1764 jiffies_to_clock_t(icsk->icsk_rto),
1765 jiffies_to_clock_t(icsk->icsk_ack.ato),
1766 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1768 state == TCP_LISTEN ?
1769 fastopenq->max_qlen :
1770 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1774 static void get_timewait6_sock(struct seq_file *seq,
1775 struct inet_timewait_sock *tw, int i)
1777 long delta = tw->tw_timer.expires - jiffies;
1778 const struct in6_addr *dest, *src;
1781 dest = &tw->tw_v6_daddr;
1782 src = &tw->tw_v6_rcv_saddr;
1783 destp = ntohs(tw->tw_dport);
1784 srcp = ntohs(tw->tw_sport);
1787 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1788 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1790 src->s6_addr32[0], src->s6_addr32[1],
1791 src->s6_addr32[2], src->s6_addr32[3], srcp,
1792 dest->s6_addr32[0], dest->s6_addr32[1],
1793 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1794 tw->tw_substate, 0, 0,
1795 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1796 atomic_read(&tw->tw_refcnt), tw);
1799 static int tcp6_seq_show(struct seq_file *seq, void *v)
1801 struct tcp_iter_state *st;
1802 struct sock *sk = v;
1804 if (v == SEQ_START_TOKEN) {
1809 "st tx_queue rx_queue tr tm->when retrnsmt"
1810 " uid timeout inode\n");
1815 if (sk->sk_state == TCP_TIME_WAIT)
1816 get_timewait6_sock(seq, v, st->num);
1817 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1818 get_openreq6(seq, v, st->num);
1820 get_tcp6_sock(seq, v, st->num);
1825 static const struct file_operations tcp6_afinfo_seq_fops = {
1826 .owner = THIS_MODULE,
1827 .open = tcp_seq_open,
1829 .llseek = seq_lseek,
1830 .release = seq_release_net
1833 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1836 .seq_fops = &tcp6_afinfo_seq_fops,
1838 .show = tcp6_seq_show,
1842 int __net_init tcp6_proc_init(struct net *net)
1844 return tcp_proc_register(net, &tcp6_seq_afinfo);
1847 void tcp6_proc_exit(struct net *net)
1849 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1853 static void tcp_v6_clear_sk(struct sock *sk, int size)
1855 struct inet_sock *inet = inet_sk(sk);
1857 /* we do not want to clear pinet6 field, because of RCU lookups */
1858 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1860 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1861 memset(&inet->pinet6 + 1, 0, size);
1864 struct proto tcpv6_prot = {
1866 .owner = THIS_MODULE,
1868 .connect = tcp_v6_connect,
1869 .disconnect = tcp_disconnect,
1870 .accept = inet_csk_accept,
1872 .init = tcp_v6_init_sock,
1873 .destroy = tcp_v6_destroy_sock,
1874 .shutdown = tcp_shutdown,
1875 .setsockopt = tcp_setsockopt,
1876 .getsockopt = tcp_getsockopt,
1877 .recvmsg = tcp_recvmsg,
1878 .sendmsg = tcp_sendmsg,
1879 .sendpage = tcp_sendpage,
1880 .backlog_rcv = tcp_v6_do_rcv,
1881 .release_cb = tcp_release_cb,
1883 .unhash = inet_unhash,
1884 .get_port = inet_csk_get_port,
1885 .enter_memory_pressure = tcp_enter_memory_pressure,
1886 .stream_memory_free = tcp_stream_memory_free,
1887 .sockets_allocated = &tcp_sockets_allocated,
1888 .memory_allocated = &tcp_memory_allocated,
1889 .memory_pressure = &tcp_memory_pressure,
1890 .orphan_count = &tcp_orphan_count,
1891 .sysctl_mem = sysctl_tcp_mem,
1892 .sysctl_wmem = sysctl_tcp_wmem,
1893 .sysctl_rmem = sysctl_tcp_rmem,
1894 .max_header = MAX_TCP_HEADER,
1895 .obj_size = sizeof(struct tcp6_sock),
1896 .slab_flags = SLAB_DESTROY_BY_RCU,
1897 .twsk_prot = &tcp6_timewait_sock_ops,
1898 .rsk_prot = &tcp6_request_sock_ops,
1899 .h.hashinfo = &tcp_hashinfo,
1900 .no_autobind = true,
1901 #ifdef CONFIG_COMPAT
1902 .compat_setsockopt = compat_tcp_setsockopt,
1903 .compat_getsockopt = compat_tcp_getsockopt,
1905 #ifdef CONFIG_MEMCG_KMEM
1906 .proto_cgroup = tcp_proto_cgroup,
1908 .clear_sk = tcp_v6_clear_sk,
1909 .diag_destroy = tcp_abort,
1912 static const struct inet6_protocol tcpv6_protocol = {
1913 .early_demux = tcp_v6_early_demux,
1914 .handler = tcp_v6_rcv,
1915 .err_handler = tcp_v6_err,
1916 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1919 static struct inet_protosw tcpv6_protosw = {
1920 .type = SOCK_STREAM,
1921 .protocol = IPPROTO_TCP,
1922 .prot = &tcpv6_prot,
1923 .ops = &inet6_stream_ops,
1924 .flags = INET_PROTOSW_PERMANENT |
1928 static int __net_init tcpv6_net_init(struct net *net)
1930 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1931 SOCK_RAW, IPPROTO_TCP, net);
1934 static void __net_exit tcpv6_net_exit(struct net *net)
1936 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1939 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1941 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1944 static struct pernet_operations tcpv6_net_ops = {
1945 .init = tcpv6_net_init,
1946 .exit = tcpv6_net_exit,
1947 .exit_batch = tcpv6_net_exit_batch,
1950 int __init tcpv6_init(void)
1954 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1958 /* register inet6 protocol */
1959 ret = inet6_register_protosw(&tcpv6_protosw);
1961 goto out_tcpv6_protocol;
1963 ret = register_pernet_subsys(&tcpv6_net_ops);
1965 goto out_tcpv6_protosw;
1970 inet6_unregister_protosw(&tcpv6_protosw);
1972 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1976 void tcpv6_exit(void)
1978 unregister_pernet_subsys(&tcpv6_net_ops);
1979 inet6_unregister_protosw(&tcpv6_protosw);
1980 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);