3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
65 #include <asm/uaccess.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 struct in6_addr *addr)
92 static void tcp_v6_hash(struct sock *sk)
94 if (sk->sk_state != TCP_CLOSE) {
95 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
100 __inet6_hash(sk, NULL);
105 static __inline__ __sum16 tcp_v6_check(int len,
106 struct in6_addr *saddr,
107 struct in6_addr *daddr,
110 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
113 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
115 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
116 ipv6_hdr(skb)->saddr.s6_addr32,
118 tcp_hdr(skb)->source);
121 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
124 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
125 struct inet_sock *inet = inet_sk(sk);
126 struct inet_connection_sock *icsk = inet_csk(sk);
127 struct ipv6_pinfo *np = inet6_sk(sk);
128 struct tcp_sock *tp = tcp_sk(sk);
129 struct in6_addr *saddr = NULL, *final_p = NULL, final;
131 struct dst_entry *dst;
135 if (addr_len < SIN6_LEN_RFC2133)
138 if (usin->sin6_family != AF_INET6)
139 return(-EAFNOSUPPORT);
141 memset(&fl, 0, sizeof(fl));
144 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
145 IP6_ECN_flow_init(fl.fl6_flowlabel);
146 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
147 struct ip6_flowlabel *flowlabel;
148 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
149 if (flowlabel == NULL)
151 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
152 fl6_sock_release(flowlabel);
157 * connect() to INADDR_ANY means loopback (BSD'ism).
160 if(ipv6_addr_any(&usin->sin6_addr))
161 usin->sin6_addr.s6_addr[15] = 0x1;
163 addr_type = ipv6_addr_type(&usin->sin6_addr);
165 if(addr_type & IPV6_ADDR_MULTICAST)
168 if (addr_type&IPV6_ADDR_LINKLOCAL) {
169 if (addr_len >= sizeof(struct sockaddr_in6) &&
170 usin->sin6_scope_id) {
171 /* If interface is set while binding, indices
174 if (sk->sk_bound_dev_if &&
175 sk->sk_bound_dev_if != usin->sin6_scope_id)
178 sk->sk_bound_dev_if = usin->sin6_scope_id;
181 /* Connect to link-local address requires an interface */
182 if (!sk->sk_bound_dev_if)
186 if (tp->rx_opt.ts_recent_stamp &&
187 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
188 tp->rx_opt.ts_recent = 0;
189 tp->rx_opt.ts_recent_stamp = 0;
193 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
194 np->flow_label = fl.fl6_flowlabel;
200 if (addr_type == IPV6_ADDR_MAPPED) {
201 u32 exthdrlen = icsk->icsk_ext_hdr_len;
202 struct sockaddr_in sin;
204 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
206 if (__ipv6_only_sock(sk))
209 sin.sin_family = AF_INET;
210 sin.sin_port = usin->sin6_port;
211 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
213 icsk->icsk_af_ops = &ipv6_mapped;
214 sk->sk_backlog_rcv = tcp_v4_do_rcv;
215 #ifdef CONFIG_TCP_MD5SIG
216 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
219 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
222 icsk->icsk_ext_hdr_len = exthdrlen;
223 icsk->icsk_af_ops = &ipv6_specific;
224 sk->sk_backlog_rcv = tcp_v6_do_rcv;
225 #ifdef CONFIG_TCP_MD5SIG
226 tp->af_specific = &tcp_sock_ipv6_specific;
230 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
231 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
238 if (!ipv6_addr_any(&np->rcv_saddr))
239 saddr = &np->rcv_saddr;
241 fl.proto = IPPROTO_TCP;
242 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
243 ipv6_addr_copy(&fl.fl6_src,
244 (saddr ? saddr : &np->saddr));
245 fl.oif = sk->sk_bound_dev_if;
246 fl.mark = sk->sk_mark;
247 fl.fl_ip_dport = usin->sin6_port;
248 fl.fl_ip_sport = inet->inet_sport;
250 if (np->opt && np->opt->srcrt) {
251 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
252 ipv6_addr_copy(&final, &fl.fl6_dst);
253 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
257 security_sk_classify_flow(sk, &fl);
259 err = ip6_dst_lookup(sk, &dst, &fl);
263 ipv6_addr_copy(&fl.fl6_dst, final_p);
265 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
268 err = ip6_dst_blackhole(sk, &dst, &fl);
275 ipv6_addr_copy(&np->rcv_saddr, saddr);
278 /* set the source address */
279 ipv6_addr_copy(&np->saddr, saddr);
280 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
282 sk->sk_gso_type = SKB_GSO_TCPV6;
283 __ip6_dst_store(sk, dst, NULL, NULL);
285 icsk->icsk_ext_hdr_len = 0;
287 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
290 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
292 inet->inet_dport = usin->sin6_port;
294 tcp_set_state(sk, TCP_SYN_SENT);
295 err = inet6_hash_connect(&tcp_death_row, sk);
300 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
305 err = tcp_connect(sk);
312 tcp_set_state(sk, TCP_CLOSE);
315 inet->inet_dport = 0;
316 sk->sk_route_caps = 0;
320 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
321 u8 type, u8 code, int offset, __be32 info)
323 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
324 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
325 struct ipv6_pinfo *np;
330 struct net *net = dev_net(skb->dev);
332 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
333 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
336 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
341 if (sk->sk_state == TCP_TIME_WAIT) {
342 inet_twsk_put(inet_twsk(sk));
347 if (sock_owned_by_user(sk))
348 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
350 if (sk->sk_state == TCP_CLOSE)
354 seq = ntohl(th->seq);
355 if (sk->sk_state != TCP_LISTEN &&
356 !between(seq, tp->snd_una, tp->snd_nxt)) {
357 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
363 if (type == ICMPV6_PKT_TOOBIG) {
364 struct dst_entry *dst = NULL;
366 if (sock_owned_by_user(sk))
368 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
371 /* icmp should have updated the destination cache entry */
372 dst = __sk_dst_check(sk, np->dst_cookie);
375 struct inet_sock *inet = inet_sk(sk);
378 /* BUGGG_FUTURE: Again, it is not clear how
379 to handle rthdr case. Ignore this complexity
382 memset(&fl, 0, sizeof(fl));
383 fl.proto = IPPROTO_TCP;
384 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
385 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
386 fl.oif = sk->sk_bound_dev_if;
387 fl.mark = sk->sk_mark;
388 fl.fl_ip_dport = inet->inet_dport;
389 fl.fl_ip_sport = inet->inet_sport;
390 security_skb_classify_flow(skb, &fl);
392 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
393 sk->sk_err_soft = -err;
397 if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
398 sk->sk_err_soft = -err;
405 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
406 tcp_sync_mss(sk, dst_mtu(dst));
407 tcp_simple_retransmit(sk);
408 } /* else let the usual retransmit timer handle it */
413 icmpv6_err_convert(type, code, &err);
415 /* Might be for an request_sock */
416 switch (sk->sk_state) {
417 struct request_sock *req, **prev;
419 if (sock_owned_by_user(sk))
422 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
423 &hdr->saddr, inet6_iif(skb));
427 /* ICMPs are not backlogged, hence we cannot get
428 * an established socket here.
430 WARN_ON(req->sk != NULL);
432 if (seq != tcp_rsk(req)->snt_isn) {
433 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
437 inet_csk_reqsk_queue_drop(sk, req, prev);
441 case TCP_SYN_RECV: /* Cannot happen.
442 It can, it SYNs are crossed. --ANK */
443 if (!sock_owned_by_user(sk)) {
445 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
449 sk->sk_err_soft = err;
453 if (!sock_owned_by_user(sk) && np->recverr) {
455 sk->sk_error_report(sk);
457 sk->sk_err_soft = err;
465 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
466 struct request_values *rvp)
468 struct inet6_request_sock *treq = inet6_rsk(req);
469 struct ipv6_pinfo *np = inet6_sk(sk);
470 struct sk_buff * skb;
471 struct ipv6_txoptions *opt = NULL;
472 struct in6_addr * final_p = NULL, final;
474 struct dst_entry *dst;
477 memset(&fl, 0, sizeof(fl));
478 fl.proto = IPPROTO_TCP;
479 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
480 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
481 fl.fl6_flowlabel = 0;
483 fl.mark = sk->sk_mark;
484 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
485 fl.fl_ip_sport = inet_rsk(req)->loc_port;
486 security_req_classify_flow(req, &fl);
489 if (opt && opt->srcrt) {
490 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
491 ipv6_addr_copy(&final, &fl.fl6_dst);
492 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
496 err = ip6_dst_lookup(sk, &dst, &fl);
500 ipv6_addr_copy(&fl.fl6_dst, final_p);
501 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
504 skb = tcp_make_synack(sk, dst, req, rvp);
506 struct tcphdr *th = tcp_hdr(skb);
508 th->check = tcp_v6_check(skb->len,
509 &treq->loc_addr, &treq->rmt_addr,
510 csum_partial(th, skb->len, skb->csum));
512 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
513 err = ip6_xmit(sk, skb, &fl, opt, 0);
514 err = net_xmit_eval(err);
518 if (opt && opt != np->opt)
519 sock_kfree_s(sk, opt, opt->tot_len);
524 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
525 struct request_values *rvp)
527 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
528 return tcp_v6_send_synack(sk, req, rvp);
531 static inline void syn_flood_warning(struct sk_buff *skb)
533 #ifdef CONFIG_SYN_COOKIES
534 if (sysctl_tcp_syncookies)
536 "TCPv6: Possible SYN flooding on port %d. "
537 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
541 "TCPv6: Possible SYN flooding on port %d. "
542 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
545 static void tcp_v6_reqsk_destructor(struct request_sock *req)
547 kfree_skb(inet6_rsk(req)->pktopts);
550 #ifdef CONFIG_TCP_MD5SIG
551 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
552 struct in6_addr *addr)
554 struct tcp_sock *tp = tcp_sk(sk);
559 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
562 for (i = 0; i < tp->md5sig_info->entries6; i++) {
563 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
564 return &tp->md5sig_info->keys6[i].base;
569 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
570 struct sock *addr_sk)
572 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
575 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
576 struct request_sock *req)
578 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
581 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
582 char *newkey, u8 newkeylen)
584 /* Add key to the list */
585 struct tcp_md5sig_key *key;
586 struct tcp_sock *tp = tcp_sk(sk);
587 struct tcp6_md5sig_key *keys;
589 key = tcp_v6_md5_do_lookup(sk, peer);
591 /* modify existing entry - just update that one */
594 key->keylen = newkeylen;
596 /* reallocate new list if current one is full. */
597 if (!tp->md5sig_info) {
598 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
599 if (!tp->md5sig_info) {
603 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
605 if (tcp_alloc_md5sig_pool(sk) == NULL) {
609 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
610 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
611 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
614 tcp_free_md5sig_pool();
619 if (tp->md5sig_info->entries6)
620 memmove(keys, tp->md5sig_info->keys6,
621 (sizeof (tp->md5sig_info->keys6[0]) *
622 tp->md5sig_info->entries6));
624 kfree(tp->md5sig_info->keys6);
625 tp->md5sig_info->keys6 = keys;
626 tp->md5sig_info->alloced6++;
629 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
631 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
632 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
634 tp->md5sig_info->entries6++;
639 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
640 u8 *newkey, __u8 newkeylen)
642 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
646 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
648 struct tcp_sock *tp = tcp_sk(sk);
651 for (i = 0; i < tp->md5sig_info->entries6; i++) {
652 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
654 kfree(tp->md5sig_info->keys6[i].base.key);
655 tp->md5sig_info->entries6--;
657 if (tp->md5sig_info->entries6 == 0) {
658 kfree(tp->md5sig_info->keys6);
659 tp->md5sig_info->keys6 = NULL;
660 tp->md5sig_info->alloced6 = 0;
662 /* shrink the database */
663 if (tp->md5sig_info->entries6 != i)
664 memmove(&tp->md5sig_info->keys6[i],
665 &tp->md5sig_info->keys6[i+1],
666 (tp->md5sig_info->entries6 - i)
667 * sizeof (tp->md5sig_info->keys6[0]));
669 tcp_free_md5sig_pool();
676 static void tcp_v6_clear_md5_list (struct sock *sk)
678 struct tcp_sock *tp = tcp_sk(sk);
681 if (tp->md5sig_info->entries6) {
682 for (i = 0; i < tp->md5sig_info->entries6; i++)
683 kfree(tp->md5sig_info->keys6[i].base.key);
684 tp->md5sig_info->entries6 = 0;
685 tcp_free_md5sig_pool();
688 kfree(tp->md5sig_info->keys6);
689 tp->md5sig_info->keys6 = NULL;
690 tp->md5sig_info->alloced6 = 0;
692 if (tp->md5sig_info->entries4) {
693 for (i = 0; i < tp->md5sig_info->entries4; i++)
694 kfree(tp->md5sig_info->keys4[i].base.key);
695 tp->md5sig_info->entries4 = 0;
696 tcp_free_md5sig_pool();
699 kfree(tp->md5sig_info->keys4);
700 tp->md5sig_info->keys4 = NULL;
701 tp->md5sig_info->alloced4 = 0;
704 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
707 struct tcp_md5sig cmd;
708 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
711 if (optlen < sizeof(cmd))
714 if (copy_from_user(&cmd, optval, sizeof(cmd)))
717 if (sin6->sin6_family != AF_INET6)
720 if (!cmd.tcpm_keylen) {
721 if (!tcp_sk(sk)->md5sig_info)
723 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
724 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
725 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
728 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
731 if (!tcp_sk(sk)->md5sig_info) {
732 struct tcp_sock *tp = tcp_sk(sk);
733 struct tcp_md5sig_info *p;
735 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
740 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
743 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
746 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
747 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
748 newkey, cmd.tcpm_keylen);
750 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
753 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
754 struct in6_addr *daddr,
755 struct in6_addr *saddr, int nbytes)
757 struct tcp6_pseudohdr *bp;
758 struct scatterlist sg;
760 bp = &hp->md5_blk.ip6;
761 /* 1. TCP pseudo-header (RFC2460) */
762 ipv6_addr_copy(&bp->saddr, saddr);
763 ipv6_addr_copy(&bp->daddr, daddr);
764 bp->protocol = cpu_to_be32(IPPROTO_TCP);
765 bp->len = cpu_to_be32(nbytes);
767 sg_init_one(&sg, bp, sizeof(*bp));
768 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
771 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
772 struct in6_addr *daddr, struct in6_addr *saddr,
775 struct tcp_md5sig_pool *hp;
776 struct hash_desc *desc;
778 hp = tcp_get_md5sig_pool();
780 goto clear_hash_noput;
781 desc = &hp->md5_desc;
783 if (crypto_hash_init(desc))
785 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
787 if (tcp_md5_hash_header(hp, th))
789 if (tcp_md5_hash_key(hp, key))
791 if (crypto_hash_final(desc, md5_hash))
794 tcp_put_md5sig_pool();
798 tcp_put_md5sig_pool();
800 memset(md5_hash, 0, 16);
804 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
805 struct sock *sk, struct request_sock *req,
808 struct in6_addr *saddr, *daddr;
809 struct tcp_md5sig_pool *hp;
810 struct hash_desc *desc;
811 struct tcphdr *th = tcp_hdr(skb);
814 saddr = &inet6_sk(sk)->saddr;
815 daddr = &inet6_sk(sk)->daddr;
817 saddr = &inet6_rsk(req)->loc_addr;
818 daddr = &inet6_rsk(req)->rmt_addr;
820 struct ipv6hdr *ip6h = ipv6_hdr(skb);
821 saddr = &ip6h->saddr;
822 daddr = &ip6h->daddr;
825 hp = tcp_get_md5sig_pool();
827 goto clear_hash_noput;
828 desc = &hp->md5_desc;
830 if (crypto_hash_init(desc))
833 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
835 if (tcp_md5_hash_header(hp, th))
837 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
839 if (tcp_md5_hash_key(hp, key))
841 if (crypto_hash_final(desc, md5_hash))
844 tcp_put_md5sig_pool();
848 tcp_put_md5sig_pool();
850 memset(md5_hash, 0, 16);
854 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
856 __u8 *hash_location = NULL;
857 struct tcp_md5sig_key *hash_expected;
858 struct ipv6hdr *ip6h = ipv6_hdr(skb);
859 struct tcphdr *th = tcp_hdr(skb);
863 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
864 hash_location = tcp_parse_md5sig_option(th);
866 /* We've parsed the options - do we have a hash? */
867 if (!hash_expected && !hash_location)
870 if (hash_expected && !hash_location) {
871 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
875 if (!hash_expected && hash_location) {
876 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
880 /* check the signature */
881 genhash = tcp_v6_md5_hash_skb(newhash,
885 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
886 if (net_ratelimit()) {
887 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
888 genhash ? "failed" : "mismatch",
889 &ip6h->saddr, ntohs(th->source),
890 &ip6h->daddr, ntohs(th->dest));
898 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
900 .obj_size = sizeof(struct tcp6_request_sock),
901 .rtx_syn_ack = tcp_v6_rtx_synack,
902 .send_ack = tcp_v6_reqsk_send_ack,
903 .destructor = tcp_v6_reqsk_destructor,
904 .send_reset = tcp_v6_send_reset,
905 .syn_ack_timeout = tcp_syn_ack_timeout,
908 #ifdef CONFIG_TCP_MD5SIG
909 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
910 .md5_lookup = tcp_v6_reqsk_md5_lookup,
911 .calc_md5_hash = tcp_v6_md5_hash_skb,
915 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
916 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
917 .twsk_unique = tcp_twsk_unique,
918 .twsk_destructor= tcp_twsk_destructor,
921 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
923 struct ipv6_pinfo *np = inet6_sk(sk);
924 struct tcphdr *th = tcp_hdr(skb);
926 if (skb->ip_summed == CHECKSUM_PARTIAL) {
927 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
928 skb->csum_start = skb_transport_header(skb) - skb->head;
929 skb->csum_offset = offsetof(struct tcphdr, check);
931 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
932 csum_partial(th, th->doff<<2,
937 static int tcp_v6_gso_send_check(struct sk_buff *skb)
939 struct ipv6hdr *ipv6h;
942 if (!pskb_may_pull(skb, sizeof(*th)))
945 ipv6h = ipv6_hdr(skb);
949 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
951 skb->csum_start = skb_transport_header(skb) - skb->head;
952 skb->csum_offset = offsetof(struct tcphdr, check);
953 skb->ip_summed = CHECKSUM_PARTIAL;
957 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
960 struct ipv6hdr *iph = skb_gro_network_header(skb);
962 switch (skb->ip_summed) {
963 case CHECKSUM_COMPLETE:
964 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
966 skb->ip_summed = CHECKSUM_UNNECESSARY;
972 NAPI_GRO_CB(skb)->flush = 1;
976 return tcp_gro_receive(head, skb);
979 static int tcp6_gro_complete(struct sk_buff *skb)
981 struct ipv6hdr *iph = ipv6_hdr(skb);
982 struct tcphdr *th = tcp_hdr(skb);
984 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
985 &iph->saddr, &iph->daddr, 0);
986 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
988 return tcp_gro_complete(skb);
991 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
992 u32 ts, struct tcp_md5sig_key *key, int rst)
994 struct tcphdr *th = tcp_hdr(skb), *t1;
995 struct sk_buff *buff;
997 struct net *net = dev_net(skb_dst(skb)->dev);
998 struct sock *ctl_sk = net->ipv6.tcp_sk;
999 unsigned int tot_len = sizeof(struct tcphdr);
1000 struct dst_entry *dst;
1004 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1005 #ifdef CONFIG_TCP_MD5SIG
1007 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1010 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1015 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1017 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1018 skb_reset_transport_header(buff);
1020 /* Swap the send and the receive. */
1021 memset(t1, 0, sizeof(*t1));
1022 t1->dest = th->source;
1023 t1->source = th->dest;
1024 t1->doff = tot_len / 4;
1025 t1->seq = htonl(seq);
1026 t1->ack_seq = htonl(ack);
1027 t1->ack = !rst || !th->ack;
1029 t1->window = htons(win);
1031 topt = (__be32 *)(t1 + 1);
1034 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1035 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1036 *topt++ = htonl(tcp_time_stamp);
1037 *topt++ = htonl(ts);
1040 #ifdef CONFIG_TCP_MD5SIG
1042 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1043 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1044 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
1045 &ipv6_hdr(skb)->saddr,
1046 &ipv6_hdr(skb)->daddr, t1);
1050 buff->csum = csum_partial(t1, tot_len, 0);
1052 memset(&fl, 0, sizeof(fl));
1053 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1054 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1056 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1057 tot_len, IPPROTO_TCP,
1060 fl.proto = IPPROTO_TCP;
1061 fl.oif = inet6_iif(skb);
1062 fl.fl_ip_dport = t1->dest;
1063 fl.fl_ip_sport = t1->source;
1064 security_skb_classify_flow(skb, &fl);
1066 /* Pass a socket to ip6_dst_lookup either it is for RST
1067 * Underlying function will use this to retrieve the network
1070 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
1071 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
1072 skb_dst_set(buff, dst);
1073 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1074 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1076 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1084 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1086 struct tcphdr *th = tcp_hdr(skb);
1087 u32 seq = 0, ack_seq = 0;
1088 struct tcp_md5sig_key *key = NULL;
1093 if (!ipv6_unicast_destination(skb))
1096 #ifdef CONFIG_TCP_MD5SIG
1098 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1102 seq = ntohl(th->ack_seq);
1104 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1107 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1110 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1111 struct tcp_md5sig_key *key)
1113 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1116 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1118 struct inet_timewait_sock *tw = inet_twsk(sk);
1119 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1121 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1122 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1123 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1128 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1129 struct request_sock *req)
1131 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1132 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1136 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1138 struct request_sock *req, **prev;
1139 const struct tcphdr *th = tcp_hdr(skb);
1142 /* Find possible connection requests. */
1143 req = inet6_csk_search_req(sk, &prev, th->source,
1144 &ipv6_hdr(skb)->saddr,
1145 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1147 return tcp_check_req(sk, skb, req, prev);
1149 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1150 &ipv6_hdr(skb)->saddr, th->source,
1151 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1154 if (nsk->sk_state != TCP_TIME_WAIT) {
1158 inet_twsk_put(inet_twsk(nsk));
1162 #ifdef CONFIG_SYN_COOKIES
1163 if (!th->rst && !th->syn && th->ack)
1164 sk = cookie_v6_check(sk, skb);
1169 /* FIXME: this is substantially similar to the ipv4 code.
1170 * Can some kind of merge be done? -- erics
1172 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1174 struct tcp_extend_values tmp_ext;
1175 struct tcp_options_received tmp_opt;
1177 struct request_sock *req;
1178 struct inet6_request_sock *treq;
1179 struct ipv6_pinfo *np = inet6_sk(sk);
1180 struct tcp_sock *tp = tcp_sk(sk);
1181 __u32 isn = TCP_SKB_CB(skb)->when;
1182 #ifdef CONFIG_SYN_COOKIES
1183 int want_cookie = 0;
1185 #define want_cookie 0
1188 if (skb->protocol == htons(ETH_P_IP))
1189 return tcp_v4_conn_request(sk, skb);
1191 if (!ipv6_unicast_destination(skb))
1194 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1195 if (net_ratelimit())
1196 syn_flood_warning(skb);
1197 #ifdef CONFIG_SYN_COOKIES
1198 if (sysctl_tcp_syncookies)
1205 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1208 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1212 #ifdef CONFIG_TCP_MD5SIG
1213 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1216 tcp_clear_options(&tmp_opt);
1217 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1218 tmp_opt.user_mss = tp->rx_opt.user_mss;
1219 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1221 if (tmp_opt.cookie_plus > 0 &&
1222 tmp_opt.saw_tstamp &&
1223 !tp->rx_opt.cookie_out_never &&
1224 (sysctl_tcp_cookie_size > 0 ||
1225 (tp->cookie_values != NULL &&
1226 tp->cookie_values->cookie_desired > 0))) {
1229 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1230 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1232 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1235 /* Secret recipe starts with IP addresses */
1236 d = &ipv6_hdr(skb)->daddr.s6_addr32[0];
1241 d = &ipv6_hdr(skb)->saddr.s6_addr32[0];
1247 /* plus variable length Initiator Cookie */
1250 *c++ ^= *hash_location++;
1252 #ifdef CONFIG_SYN_COOKIES
1253 want_cookie = 0; /* not our kind of cookie */
1255 tmp_ext.cookie_out_never = 0; /* false */
1256 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1257 } else if (!tp->rx_opt.cookie_in_always) {
1258 /* redundant indications, but ensure initialization. */
1259 tmp_ext.cookie_out_never = 1; /* true */
1260 tmp_ext.cookie_plus = 0;
1264 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1266 if (want_cookie && !tmp_opt.saw_tstamp)
1267 tcp_clear_options(&tmp_opt);
1269 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1270 tcp_openreq_init(req, &tmp_opt, skb);
1272 treq = inet6_rsk(req);
1273 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1274 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1276 TCP_ECN_create_request(req, tcp_hdr(skb));
1279 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1280 req->cookie_ts = tmp_opt.tstamp_ok;
1282 if (ipv6_opt_accepted(sk, skb) ||
1283 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1284 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1285 atomic_inc(&skb->users);
1286 treq->pktopts = skb;
1288 treq->iif = sk->sk_bound_dev_if;
1290 /* So that link locals have meaning */
1291 if (!sk->sk_bound_dev_if &&
1292 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1293 treq->iif = inet6_iif(skb);
1295 isn = tcp_v6_init_sequence(skb);
1297 tcp_rsk(req)->snt_isn = isn;
1299 security_inet_conn_request(sk, skb, req);
1301 if (tcp_v6_send_synack(sk, req,
1302 (struct request_values *)&tmp_ext) ||
1306 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1312 return 0; /* don't send reset */
1315 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1316 struct request_sock *req,
1317 struct dst_entry *dst)
1319 struct inet6_request_sock *treq;
1320 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1321 struct tcp6_sock *newtcp6sk;
1322 struct inet_sock *newinet;
1323 struct tcp_sock *newtp;
1325 struct ipv6_txoptions *opt;
1326 #ifdef CONFIG_TCP_MD5SIG
1327 struct tcp_md5sig_key *key;
1330 if (skb->protocol == htons(ETH_P_IP)) {
1335 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1340 newtcp6sk = (struct tcp6_sock *)newsk;
1341 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1343 newinet = inet_sk(newsk);
1344 newnp = inet6_sk(newsk);
1345 newtp = tcp_sk(newsk);
1347 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1349 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1351 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1353 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1355 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1356 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1357 #ifdef CONFIG_TCP_MD5SIG
1358 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1361 newnp->pktoptions = NULL;
1363 newnp->mcast_oif = inet6_iif(skb);
1364 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1367 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1368 * here, tcp_create_openreq_child now does this for us, see the comment in
1369 * that function for the gory details. -acme
1372 /* It is tricky place. Until this moment IPv4 tcp
1373 worked with IPv6 icsk.icsk_af_ops.
1376 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1381 treq = inet6_rsk(req);
1384 if (sk_acceptq_is_full(sk))
1388 struct in6_addr *final_p = NULL, final;
1391 memset(&fl, 0, sizeof(fl));
1392 fl.proto = IPPROTO_TCP;
1393 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1394 if (opt && opt->srcrt) {
1395 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1396 ipv6_addr_copy(&final, &fl.fl6_dst);
1397 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1400 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1401 fl.oif = sk->sk_bound_dev_if;
1402 fl.mark = sk->sk_mark;
1403 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1404 fl.fl_ip_sport = inet_rsk(req)->loc_port;
1405 security_req_classify_flow(req, &fl);
1407 if (ip6_dst_lookup(sk, &dst, &fl))
1411 ipv6_addr_copy(&fl.fl6_dst, final_p);
1413 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
1417 newsk = tcp_create_openreq_child(sk, req, skb);
1422 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1423 * count here, tcp_create_openreq_child now does this for us, see the
1424 * comment in that function for the gory details. -acme
1427 newsk->sk_gso_type = SKB_GSO_TCPV6;
1428 __ip6_dst_store(newsk, dst, NULL, NULL);
1430 newtcp6sk = (struct tcp6_sock *)newsk;
1431 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1433 newtp = tcp_sk(newsk);
1434 newinet = inet_sk(newsk);
1435 newnp = inet6_sk(newsk);
1437 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1439 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1440 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1441 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1442 newsk->sk_bound_dev_if = treq->iif;
1444 /* Now IPv6 options...
1446 First: no IPv4 options.
1448 newinet->opt = NULL;
1449 newnp->ipv6_fl_list = NULL;
1452 newnp->rxopt.all = np->rxopt.all;
1454 /* Clone pktoptions received with SYN */
1455 newnp->pktoptions = NULL;
1456 if (treq->pktopts != NULL) {
1457 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1458 kfree_skb(treq->pktopts);
1459 treq->pktopts = NULL;
1460 if (newnp->pktoptions)
1461 skb_set_owner_r(newnp->pktoptions, newsk);
1464 newnp->mcast_oif = inet6_iif(skb);
1465 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1467 /* Clone native IPv6 options from listening socket (if any)
1469 Yes, keeping reference count would be much more clever,
1470 but we make one more one thing there: reattach optmem
1474 newnp->opt = ipv6_dup_options(newsk, opt);
1476 sock_kfree_s(sk, opt, opt->tot_len);
1479 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1481 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1482 newnp->opt->opt_flen);
1484 tcp_mtup_init(newsk);
1485 tcp_sync_mss(newsk, dst_mtu(dst));
1486 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1487 tcp_initialize_rcv_mss(newsk);
1489 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1490 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1492 #ifdef CONFIG_TCP_MD5SIG
1493 /* Copy over the MD5 key from the original socket */
1494 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1495 /* We're using one, so create a matching key
1496 * on the newsk structure. If we fail to get
1497 * memory, then we end up not copying the key
1500 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1502 tcp_v6_md5_do_add(newsk, &newnp->daddr,
1503 newkey, key->keylen);
1507 __inet6_hash(newsk, NULL);
1508 __inet_inherit_port(sk, newsk);
1513 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1515 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1516 if (opt && opt != np->opt)
1517 sock_kfree_s(sk, opt, opt->tot_len);
1522 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1524 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1525 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1526 &ipv6_hdr(skb)->daddr, skb->csum)) {
1527 skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1533 &ipv6_hdr(skb)->saddr,
1534 &ipv6_hdr(skb)->daddr, 0));
1536 if (skb->len <= 76) {
1537 return __skb_checksum_complete(skb);
1542 /* The socket must have it's spinlock held when we get
1545 * We have a potential double-lock case here, so even when
1546 * doing backlog processing we use the BH locking scheme.
1547 * This is because we cannot sleep with the original spinlock
1550 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1552 struct ipv6_pinfo *np = inet6_sk(sk);
1553 struct tcp_sock *tp;
1554 struct sk_buff *opt_skb = NULL;
1556 /* Imagine: socket is IPv6. IPv4 packet arrives,
1557 goes to IPv4 receive handler and backlogged.
1558 From backlog it always goes here. Kerboom...
1559 Fortunately, tcp_rcv_established and rcv_established
1560 handle them correctly, but it is not case with
1561 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1564 if (skb->protocol == htons(ETH_P_IP))
1565 return tcp_v4_do_rcv(sk, skb);
1567 #ifdef CONFIG_TCP_MD5SIG
1568 if (tcp_v6_inbound_md5_hash (sk, skb))
1572 if (sk_filter(sk, skb))
1576 * socket locking is here for SMP purposes as backlog rcv
1577 * is currently called with bh processing disabled.
1580 /* Do Stevens' IPV6_PKTOPTIONS.
1582 Yes, guys, it is the only place in our code, where we
1583 may make it not affecting IPv4.
1584 The rest of code is protocol independent,
1585 and I do not like idea to uglify IPv4.
1587 Actually, all the idea behind IPV6_PKTOPTIONS
1588 looks not very well thought. For now we latch
1589 options, received in the last packet, enqueued
1590 by tcp. Feel free to propose better solution.
1594 opt_skb = skb_clone(skb, GFP_ATOMIC);
1596 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1597 TCP_CHECK_TIMER(sk);
1598 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1600 TCP_CHECK_TIMER(sk);
1602 goto ipv6_pktoptions;
1606 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1609 if (sk->sk_state == TCP_LISTEN) {
1610 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1615 * Queue it on the new socket if the new socket is active,
1616 * otherwise we just shortcircuit this and continue with
1620 if (tcp_child_process(sk, nsk, skb))
1623 __kfree_skb(opt_skb);
1628 TCP_CHECK_TIMER(sk);
1629 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1631 TCP_CHECK_TIMER(sk);
1633 goto ipv6_pktoptions;
1637 tcp_v6_send_reset(sk, skb);
1640 __kfree_skb(opt_skb);
1644 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1649 /* Do you ask, what is it?
1651 1. skb was enqueued by tcp.
1652 2. skb is added to tail of read queue, rather than out of order.
1653 3. socket is not in passive state.
1654 4. Finally, it really contains options, which user wants to receive.
1657 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1658 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1659 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1660 np->mcast_oif = inet6_iif(opt_skb);
1661 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1662 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1663 if (ipv6_opt_accepted(sk, opt_skb)) {
1664 skb_set_owner_r(opt_skb, sk);
1665 opt_skb = xchg(&np->pktoptions, opt_skb);
1667 __kfree_skb(opt_skb);
1668 opt_skb = xchg(&np->pktoptions, NULL);
1676 static int tcp_v6_rcv(struct sk_buff *skb)
1681 struct net *net = dev_net(skb->dev);
1683 if (skb->pkt_type != PACKET_HOST)
1687 * Count it even if it's bad.
1689 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1691 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1696 if (th->doff < sizeof(struct tcphdr)/4)
1698 if (!pskb_may_pull(skb, th->doff*4))
1701 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1705 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1706 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1707 skb->len - th->doff*4);
1708 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1709 TCP_SKB_CB(skb)->when = 0;
1710 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1711 TCP_SKB_CB(skb)->sacked = 0;
1713 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1718 if (sk->sk_state == TCP_TIME_WAIT)
1721 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1722 goto discard_and_relse;
1724 if (sk_filter(sk, skb))
1725 goto discard_and_relse;
1729 bh_lock_sock_nested(sk);
1731 if (!sock_owned_by_user(sk)) {
1732 #ifdef CONFIG_NET_DMA
1733 struct tcp_sock *tp = tcp_sk(sk);
1734 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1735 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1736 if (tp->ucopy.dma_chan)
1737 ret = tcp_v6_do_rcv(sk, skb);
1741 if (!tcp_prequeue(sk, skb))
1742 ret = tcp_v6_do_rcv(sk, skb);
1744 } else if (unlikely(sk_add_backlog(sk, skb))) {
1746 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1747 goto discard_and_relse;
1752 return ret ? -1 : 0;
1755 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1758 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1760 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1762 tcp_v6_send_reset(NULL, skb);
1779 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1780 inet_twsk_put(inet_twsk(sk));
1784 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1785 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1786 inet_twsk_put(inet_twsk(sk));
1790 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1795 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1796 &ipv6_hdr(skb)->daddr,
1797 ntohs(th->dest), inet6_iif(skb));
1799 struct inet_timewait_sock *tw = inet_twsk(sk);
1800 inet_twsk_deschedule(tw, &tcp_death_row);
1805 /* Fall through to ACK */
1808 tcp_v6_timewait_ack(sk, skb);
1812 case TCP_TW_SUCCESS:;
1817 static int tcp_v6_remember_stamp(struct sock *sk)
1819 /* Alas, not yet... */
1823 static const struct inet_connection_sock_af_ops ipv6_specific = {
1824 .queue_xmit = inet6_csk_xmit,
1825 .send_check = tcp_v6_send_check,
1826 .rebuild_header = inet6_sk_rebuild_header,
1827 .conn_request = tcp_v6_conn_request,
1828 .syn_recv_sock = tcp_v6_syn_recv_sock,
1829 .remember_stamp = tcp_v6_remember_stamp,
1830 .net_header_len = sizeof(struct ipv6hdr),
1831 .setsockopt = ipv6_setsockopt,
1832 .getsockopt = ipv6_getsockopt,
1833 .addr2sockaddr = inet6_csk_addr2sockaddr,
1834 .sockaddr_len = sizeof(struct sockaddr_in6),
1835 .bind_conflict = inet6_csk_bind_conflict,
1836 #ifdef CONFIG_COMPAT
1837 .compat_setsockopt = compat_ipv6_setsockopt,
1838 .compat_getsockopt = compat_ipv6_getsockopt,
1842 #ifdef CONFIG_TCP_MD5SIG
1843 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1844 .md5_lookup = tcp_v6_md5_lookup,
1845 .calc_md5_hash = tcp_v6_md5_hash_skb,
1846 .md5_add = tcp_v6_md5_add_func,
1847 .md5_parse = tcp_v6_parse_md5_keys,
1852 * TCP over IPv4 via INET6 API
1855 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1856 .queue_xmit = ip_queue_xmit,
1857 .send_check = tcp_v4_send_check,
1858 .rebuild_header = inet_sk_rebuild_header,
1859 .conn_request = tcp_v6_conn_request,
1860 .syn_recv_sock = tcp_v6_syn_recv_sock,
1861 .remember_stamp = tcp_v4_remember_stamp,
1862 .net_header_len = sizeof(struct iphdr),
1863 .setsockopt = ipv6_setsockopt,
1864 .getsockopt = ipv6_getsockopt,
1865 .addr2sockaddr = inet6_csk_addr2sockaddr,
1866 .sockaddr_len = sizeof(struct sockaddr_in6),
1867 .bind_conflict = inet6_csk_bind_conflict,
1868 #ifdef CONFIG_COMPAT
1869 .compat_setsockopt = compat_ipv6_setsockopt,
1870 .compat_getsockopt = compat_ipv6_getsockopt,
1874 #ifdef CONFIG_TCP_MD5SIG
1875 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1876 .md5_lookup = tcp_v4_md5_lookup,
1877 .calc_md5_hash = tcp_v4_md5_hash_skb,
1878 .md5_add = tcp_v6_md5_add_func,
1879 .md5_parse = tcp_v6_parse_md5_keys,
1883 /* NOTE: A lot of things set to zero explicitly by call to
1884 * sk_alloc() so need not be done here.
1886 static int tcp_v6_init_sock(struct sock *sk)
1888 struct inet_connection_sock *icsk = inet_csk(sk);
1889 struct tcp_sock *tp = tcp_sk(sk);
1891 skb_queue_head_init(&tp->out_of_order_queue);
1892 tcp_init_xmit_timers(sk);
1893 tcp_prequeue_init(tp);
1895 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1896 tp->mdev = TCP_TIMEOUT_INIT;
1898 /* So many TCP implementations out there (incorrectly) count the
1899 * initial SYN frame in their delayed-ACK and congestion control
1900 * algorithms that we must have the following bandaid to talk
1901 * efficiently to them. -DaveM
1905 /* See draft-stevens-tcpca-spec-01 for discussion of the
1906 * initialization of these values.
1908 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1909 tp->snd_cwnd_clamp = ~0;
1910 tp->mss_cache = TCP_MSS_DEFAULT;
1912 tp->reordering = sysctl_tcp_reordering;
1914 sk->sk_state = TCP_CLOSE;
1916 icsk->icsk_af_ops = &ipv6_specific;
1917 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1918 icsk->icsk_sync_mss = tcp_sync_mss;
1919 sk->sk_write_space = sk_stream_write_space;
1920 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1922 #ifdef CONFIG_TCP_MD5SIG
1923 tp->af_specific = &tcp_sock_ipv6_specific;
1926 /* TCP Cookie Transactions */
1927 if (sysctl_tcp_cookie_size > 0) {
1928 /* Default, cookies without s_data_payload. */
1930 kzalloc(sizeof(*tp->cookie_values),
1932 if (tp->cookie_values != NULL)
1933 kref_init(&tp->cookie_values->kref);
1935 /* Presumed zeroed, in order of appearance:
1936 * cookie_in_always, cookie_out_never,
1937 * s_data_constant, s_data_in, s_data_out
1939 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1940 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1943 percpu_counter_inc(&tcp_sockets_allocated);
1949 static void tcp_v6_destroy_sock(struct sock *sk)
1951 #ifdef CONFIG_TCP_MD5SIG
1952 /* Clean up the MD5 key list */
1953 if (tcp_sk(sk)->md5sig_info)
1954 tcp_v6_clear_md5_list(sk);
1956 tcp_v4_destroy_sock(sk);
1957 inet6_destroy_sock(sk);
1960 #ifdef CONFIG_PROC_FS
1961 /* Proc filesystem TCPv6 sock list dumping. */
1962 static void get_openreq6(struct seq_file *seq,
1963 struct sock *sk, struct request_sock *req, int i, int uid)
1965 int ttd = req->expires - jiffies;
1966 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1967 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1973 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1974 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1976 src->s6_addr32[0], src->s6_addr32[1],
1977 src->s6_addr32[2], src->s6_addr32[3],
1978 ntohs(inet_rsk(req)->loc_port),
1979 dest->s6_addr32[0], dest->s6_addr32[1],
1980 dest->s6_addr32[2], dest->s6_addr32[3],
1981 ntohs(inet_rsk(req)->rmt_port),
1983 0,0, /* could print option size, but that is af dependent. */
1984 1, /* timers active (only the expire timer) */
1985 jiffies_to_clock_t(ttd),
1988 0, /* non standard timer */
1989 0, /* open_requests have no inode */
1993 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1995 struct in6_addr *dest, *src;
1998 unsigned long timer_expires;
1999 struct inet_sock *inet = inet_sk(sp);
2000 struct tcp_sock *tp = tcp_sk(sp);
2001 const struct inet_connection_sock *icsk = inet_csk(sp);
2002 struct ipv6_pinfo *np = inet6_sk(sp);
2005 src = &np->rcv_saddr;
2006 destp = ntohs(inet->inet_dport);
2007 srcp = ntohs(inet->inet_sport);
2009 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2011 timer_expires = icsk->icsk_timeout;
2012 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2014 timer_expires = icsk->icsk_timeout;
2015 } else if (timer_pending(&sp->sk_timer)) {
2017 timer_expires = sp->sk_timer.expires;
2020 timer_expires = jiffies;
2024 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2025 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
2027 src->s6_addr32[0], src->s6_addr32[1],
2028 src->s6_addr32[2], src->s6_addr32[3], srcp,
2029 dest->s6_addr32[0], dest->s6_addr32[1],
2030 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2032 tp->write_seq-tp->snd_una,
2033 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2035 jiffies_to_clock_t(timer_expires - jiffies),
2036 icsk->icsk_retransmits,
2038 icsk->icsk_probes_out,
2040 atomic_read(&sp->sk_refcnt), sp,
2041 jiffies_to_clock_t(icsk->icsk_rto),
2042 jiffies_to_clock_t(icsk->icsk_ack.ato),
2043 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2045 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
2049 static void get_timewait6_sock(struct seq_file *seq,
2050 struct inet_timewait_sock *tw, int i)
2052 struct in6_addr *dest, *src;
2054 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2055 int ttd = tw->tw_ttd - jiffies;
2060 dest = &tw6->tw_v6_daddr;
2061 src = &tw6->tw_v6_rcv_saddr;
2062 destp = ntohs(tw->tw_dport);
2063 srcp = ntohs(tw->tw_sport);
2066 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2067 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2069 src->s6_addr32[0], src->s6_addr32[1],
2070 src->s6_addr32[2], src->s6_addr32[3], srcp,
2071 dest->s6_addr32[0], dest->s6_addr32[1],
2072 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2073 tw->tw_substate, 0, 0,
2074 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2075 atomic_read(&tw->tw_refcnt), tw);
2078 static int tcp6_seq_show(struct seq_file *seq, void *v)
2080 struct tcp_iter_state *st;
2082 if (v == SEQ_START_TOKEN) {
2087 "st tx_queue rx_queue tr tm->when retrnsmt"
2088 " uid timeout inode\n");
2093 switch (st->state) {
2094 case TCP_SEQ_STATE_LISTENING:
2095 case TCP_SEQ_STATE_ESTABLISHED:
2096 get_tcp6_sock(seq, v, st->num);
2098 case TCP_SEQ_STATE_OPENREQ:
2099 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2101 case TCP_SEQ_STATE_TIME_WAIT:
2102 get_timewait6_sock(seq, v, st->num);
2109 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2113 .owner = THIS_MODULE,
2116 .show = tcp6_seq_show,
2120 int __net_init tcp6_proc_init(struct net *net)
2122 return tcp_proc_register(net, &tcp6_seq_afinfo);
2125 void tcp6_proc_exit(struct net *net)
2127 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2131 struct proto tcpv6_prot = {
2133 .owner = THIS_MODULE,
2135 .connect = tcp_v6_connect,
2136 .disconnect = tcp_disconnect,
2137 .accept = inet_csk_accept,
2139 .init = tcp_v6_init_sock,
2140 .destroy = tcp_v6_destroy_sock,
2141 .shutdown = tcp_shutdown,
2142 .setsockopt = tcp_setsockopt,
2143 .getsockopt = tcp_getsockopt,
2144 .recvmsg = tcp_recvmsg,
2145 .backlog_rcv = tcp_v6_do_rcv,
2146 .hash = tcp_v6_hash,
2147 .unhash = inet_unhash,
2148 .get_port = inet_csk_get_port,
2149 .enter_memory_pressure = tcp_enter_memory_pressure,
2150 .sockets_allocated = &tcp_sockets_allocated,
2151 .memory_allocated = &tcp_memory_allocated,
2152 .memory_pressure = &tcp_memory_pressure,
2153 .orphan_count = &tcp_orphan_count,
2154 .sysctl_mem = sysctl_tcp_mem,
2155 .sysctl_wmem = sysctl_tcp_wmem,
2156 .sysctl_rmem = sysctl_tcp_rmem,
2157 .max_header = MAX_TCP_HEADER,
2158 .obj_size = sizeof(struct tcp6_sock),
2159 .slab_flags = SLAB_DESTROY_BY_RCU,
2160 .twsk_prot = &tcp6_timewait_sock_ops,
2161 .rsk_prot = &tcp6_request_sock_ops,
2162 .h.hashinfo = &tcp_hashinfo,
2163 #ifdef CONFIG_COMPAT
2164 .compat_setsockopt = compat_tcp_setsockopt,
2165 .compat_getsockopt = compat_tcp_getsockopt,
2169 static const struct inet6_protocol tcpv6_protocol = {
2170 .handler = tcp_v6_rcv,
2171 .err_handler = tcp_v6_err,
2172 .gso_send_check = tcp_v6_gso_send_check,
2173 .gso_segment = tcp_tso_segment,
2174 .gro_receive = tcp6_gro_receive,
2175 .gro_complete = tcp6_gro_complete,
2176 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2179 static struct inet_protosw tcpv6_protosw = {
2180 .type = SOCK_STREAM,
2181 .protocol = IPPROTO_TCP,
2182 .prot = &tcpv6_prot,
2183 .ops = &inet6_stream_ops,
2185 .flags = INET_PROTOSW_PERMANENT |
2189 static int __net_init tcpv6_net_init(struct net *net)
2191 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2192 SOCK_RAW, IPPROTO_TCP, net);
2195 static void __net_exit tcpv6_net_exit(struct net *net)
2197 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2200 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2202 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2205 static struct pernet_operations tcpv6_net_ops = {
2206 .init = tcpv6_net_init,
2207 .exit = tcpv6_net_exit,
2208 .exit_batch = tcpv6_net_exit_batch,
2211 int __init tcpv6_init(void)
2215 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2219 /* register inet6 protocol */
2220 ret = inet6_register_protosw(&tcpv6_protosw);
2222 goto out_tcpv6_protocol;
2224 ret = register_pernet_subsys(&tcpv6_net_ops);
2226 goto out_tcpv6_protosw;
2231 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2233 inet6_unregister_protosw(&tcpv6_protosw);
2237 void tcpv6_exit(void)
2239 unregister_pernet_subsys(&tcpv6_net_ops);
2240 inet6_unregister_protosw(&tcpv6_protosw);
2241 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);