Revert "PM QoS: Use spinlock in the per-device PM QoS constraints code"
[firefly-linux-kernel-4.4.55.git] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on:
9  *      linux/net/ipv4/tcp.c
10  *      linux/net/ipv4/tcp_input.c
11  *      linux/net/ipv4/tcp_output.c
12  *
13  *      Fixes:
14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
17  *                                      a single port at the same time.
18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66
67 #include <asm/uaccess.h>
68
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
71
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
74
75 static void     tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void     tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77                                       struct request_sock *req);
78
79 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static void     __tcp_v6_send_check(struct sk_buff *skb,
81                                     const struct in6_addr *saddr,
82                                     const struct in6_addr *daddr);
83
84 static const struct inet_connection_sock_af_ops ipv6_mapped;
85 static const struct inet_connection_sock_af_ops ipv6_specific;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
89 #else
90 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
91                                                    const struct in6_addr *addr)
92 {
93         return NULL;
94 }
95 #endif
96
97 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
98 {
99         struct dst_entry *dst = skb_dst(skb);
100         const struct rt6_info *rt = (const struct rt6_info *)dst;
101
102         dst_hold(dst);
103         sk->sk_rx_dst = dst;
104         inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
105         if (rt->rt6i_node)
106                 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
107 }
108
109 static void tcp_v6_hash(struct sock *sk)
110 {
111         if (sk->sk_state != TCP_CLOSE) {
112                 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
113                         tcp_prot.hash(sk);
114                         return;
115                 }
116                 local_bh_disable();
117                 __inet6_hash(sk, NULL);
118                 local_bh_enable();
119         }
120 }
121
122 static __inline__ __sum16 tcp_v6_check(int len,
123                                    const struct in6_addr *saddr,
124                                    const struct in6_addr *daddr,
125                                    __wsum base)
126 {
127         return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
128 }
129
130 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
131 {
132         return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
133                                             ipv6_hdr(skb)->saddr.s6_addr32,
134                                             tcp_hdr(skb)->dest,
135                                             tcp_hdr(skb)->source);
136 }
137
138 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
139                           int addr_len)
140 {
141         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
142         struct inet_sock *inet = inet_sk(sk);
143         struct inet_connection_sock *icsk = inet_csk(sk);
144         struct ipv6_pinfo *np = inet6_sk(sk);
145         struct tcp_sock *tp = tcp_sk(sk);
146         struct in6_addr *saddr = NULL, *final_p, final;
147         struct rt6_info *rt;
148         struct flowi6 fl6;
149         struct dst_entry *dst;
150         int addr_type;
151         int err;
152
153         if (addr_len < SIN6_LEN_RFC2133)
154                 return -EINVAL;
155
156         if (usin->sin6_family != AF_INET6)
157                 return -EAFNOSUPPORT;
158
159         memset(&fl6, 0, sizeof(fl6));
160
161         if (np->sndflow) {
162                 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
163                 IP6_ECN_flow_init(fl6.flowlabel);
164                 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
165                         struct ip6_flowlabel *flowlabel;
166                         flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
167                         if (flowlabel == NULL)
168                                 return -EINVAL;
169                         usin->sin6_addr = flowlabel->dst;
170                         fl6_sock_release(flowlabel);
171                 }
172         }
173
174         /*
175          *      connect() to INADDR_ANY means loopback (BSD'ism).
176          */
177
178         if(ipv6_addr_any(&usin->sin6_addr))
179                 usin->sin6_addr.s6_addr[15] = 0x1;
180
181         addr_type = ipv6_addr_type(&usin->sin6_addr);
182
183         if(addr_type & IPV6_ADDR_MULTICAST)
184                 return -ENETUNREACH;
185
186         if (addr_type&IPV6_ADDR_LINKLOCAL) {
187                 if (addr_len >= sizeof(struct sockaddr_in6) &&
188                     usin->sin6_scope_id) {
189                         /* If interface is set while binding, indices
190                          * must coincide.
191                          */
192                         if (sk->sk_bound_dev_if &&
193                             sk->sk_bound_dev_if != usin->sin6_scope_id)
194                                 return -EINVAL;
195
196                         sk->sk_bound_dev_if = usin->sin6_scope_id;
197                 }
198
199                 /* Connect to link-local address requires an interface */
200                 if (!sk->sk_bound_dev_if)
201                         return -EINVAL;
202         }
203
204         if (tp->rx_opt.ts_recent_stamp &&
205             !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
206                 tp->rx_opt.ts_recent = 0;
207                 tp->rx_opt.ts_recent_stamp = 0;
208                 tp->write_seq = 0;
209         }
210
211         np->daddr = usin->sin6_addr;
212         np->flow_label = fl6.flowlabel;
213
214         /*
215          *      TCP over IPv4
216          */
217
218         if (addr_type == IPV6_ADDR_MAPPED) {
219                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
220                 struct sockaddr_in sin;
221
222                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
223
224                 if (__ipv6_only_sock(sk))
225                         return -ENETUNREACH;
226
227                 sin.sin_family = AF_INET;
228                 sin.sin_port = usin->sin6_port;
229                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
230
231                 icsk->icsk_af_ops = &ipv6_mapped;
232                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
233 #ifdef CONFIG_TCP_MD5SIG
234                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
235 #endif
236
237                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
238
239                 if (err) {
240                         icsk->icsk_ext_hdr_len = exthdrlen;
241                         icsk->icsk_af_ops = &ipv6_specific;
242                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
243 #ifdef CONFIG_TCP_MD5SIG
244                         tp->af_specific = &tcp_sock_ipv6_specific;
245 #endif
246                         goto failure;
247                 } else {
248                         ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
249                         ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
250                                                &np->rcv_saddr);
251                 }
252
253                 return err;
254         }
255
256         if (!ipv6_addr_any(&np->rcv_saddr))
257                 saddr = &np->rcv_saddr;
258
259         fl6.flowi6_proto = IPPROTO_TCP;
260         fl6.daddr = np->daddr;
261         fl6.saddr = saddr ? *saddr : np->saddr;
262         fl6.flowi6_oif = sk->sk_bound_dev_if;
263         fl6.flowi6_mark = sk->sk_mark;
264         fl6.fl6_dport = usin->sin6_port;
265         fl6.fl6_sport = inet->inet_sport;
266
267         final_p = fl6_update_dst(&fl6, np->opt, &final);
268
269         security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
270
271         dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
272         if (IS_ERR(dst)) {
273                 err = PTR_ERR(dst);
274                 goto failure;
275         }
276
277         if (saddr == NULL) {
278                 saddr = &fl6.saddr;
279                 np->rcv_saddr = *saddr;
280         }
281
282         /* set the source address */
283         np->saddr = *saddr;
284         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
285
286         sk->sk_gso_type = SKB_GSO_TCPV6;
287         __ip6_dst_store(sk, dst, NULL, NULL);
288
289         rt = (struct rt6_info *) dst;
290         if (tcp_death_row.sysctl_tw_recycle &&
291             !tp->rx_opt.ts_recent_stamp &&
292             ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
293                 tcp_fetch_timewait_stamp(sk, dst);
294
295         icsk->icsk_ext_hdr_len = 0;
296         if (np->opt)
297                 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
298                                           np->opt->opt_nflen);
299
300         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
301
302         inet->inet_dport = usin->sin6_port;
303
304         tcp_set_state(sk, TCP_SYN_SENT);
305         err = inet6_hash_connect(&tcp_death_row, sk);
306         if (err)
307                 goto late_failure;
308
309         if (!tp->write_seq)
310                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
311                                                              np->daddr.s6_addr32,
312                                                              inet->inet_sport,
313                                                              inet->inet_dport);
314
315         err = tcp_connect(sk);
316         if (err)
317                 goto late_failure;
318
319         return 0;
320
321 late_failure:
322         tcp_set_state(sk, TCP_CLOSE);
323         __sk_dst_reset(sk);
324 failure:
325         inet->inet_dport = 0;
326         sk->sk_route_caps = 0;
327         return err;
328 }
329
330 static void tcp_v6_mtu_reduced(struct sock *sk)
331 {
332         struct dst_entry *dst;
333
334         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
335                 return;
336
337         dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
338         if (!dst)
339                 return;
340
341         if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
342                 tcp_sync_mss(sk, dst_mtu(dst));
343                 tcp_simple_retransmit(sk);
344         }
345 }
346
347 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
348                 u8 type, u8 code, int offset, __be32 info)
349 {
350         const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
351         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
352         struct ipv6_pinfo *np;
353         struct sock *sk;
354         int err;
355         struct tcp_sock *tp;
356         __u32 seq;
357         struct net *net = dev_net(skb->dev);
358
359         sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
360                         th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
361
362         if (sk == NULL) {
363                 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
364                                    ICMP6_MIB_INERRORS);
365                 return;
366         }
367
368         if (sk->sk_state == TCP_TIME_WAIT) {
369                 inet_twsk_put(inet_twsk(sk));
370                 return;
371         }
372
373         bh_lock_sock(sk);
374         if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
375                 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
376
377         if (sk->sk_state == TCP_CLOSE)
378                 goto out;
379
380         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
381                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
382                 goto out;
383         }
384
385         tp = tcp_sk(sk);
386         seq = ntohl(th->seq);
387         if (sk->sk_state != TCP_LISTEN &&
388             !between(seq, tp->snd_una, tp->snd_nxt)) {
389                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
390                 goto out;
391         }
392
393         np = inet6_sk(sk);
394
395         if (type == NDISC_REDIRECT) {
396                 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
397
398                 if (dst)
399                         dst->ops->redirect(dst, sk, skb);
400         }
401
402         if (type == ICMPV6_PKT_TOOBIG) {
403                 tp->mtu_info = ntohl(info);
404                 if (!sock_owned_by_user(sk))
405                         tcp_v6_mtu_reduced(sk);
406                 else
407                         set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
408                 goto out;
409         }
410
411         icmpv6_err_convert(type, code, &err);
412
413         /* Might be for an request_sock */
414         switch (sk->sk_state) {
415                 struct request_sock *req, **prev;
416         case TCP_LISTEN:
417                 if (sock_owned_by_user(sk))
418                         goto out;
419
420                 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
421                                            &hdr->saddr, inet6_iif(skb));
422                 if (!req)
423                         goto out;
424
425                 /* ICMPs are not backlogged, hence we cannot get
426                  * an established socket here.
427                  */
428                 WARN_ON(req->sk != NULL);
429
430                 if (seq != tcp_rsk(req)->snt_isn) {
431                         NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
432                         goto out;
433                 }
434
435                 inet_csk_reqsk_queue_drop(sk, req, prev);
436                 goto out;
437
438         case TCP_SYN_SENT:
439         case TCP_SYN_RECV:  /* Cannot happen.
440                                It can, it SYNs are crossed. --ANK */
441                 if (!sock_owned_by_user(sk)) {
442                         sk->sk_err = err;
443                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
444
445                         tcp_done(sk);
446                 } else
447                         sk->sk_err_soft = err;
448                 goto out;
449         }
450
451         if (!sock_owned_by_user(sk) && np->recverr) {
452                 sk->sk_err = err;
453                 sk->sk_error_report(sk);
454         } else
455                 sk->sk_err_soft = err;
456
457 out:
458         bh_unlock_sock(sk);
459         sock_put(sk);
460 }
461
462
463 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
464                               struct flowi6 *fl6,
465                               struct request_sock *req,
466                               struct request_values *rvp,
467                               u16 queue_mapping)
468 {
469         struct inet6_request_sock *treq = inet6_rsk(req);
470         struct ipv6_pinfo *np = inet6_sk(sk);
471         struct sk_buff * skb;
472         int err = -ENOMEM;
473
474         /* First, grab a route. */
475         if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
476                 goto done;
477
478         skb = tcp_make_synack(sk, dst, req, rvp);
479
480         if (skb) {
481                 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
482
483                 fl6->daddr = treq->rmt_addr;
484                 skb_set_queue_mapping(skb, queue_mapping);
485                 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
486                 err = net_xmit_eval(err);
487         }
488
489 done:
490         return err;
491 }
492
493 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
494                              struct request_values *rvp)
495 {
496         struct flowi6 fl6;
497
498         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
499         return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
500 }
501
502 static void tcp_v6_reqsk_destructor(struct request_sock *req)
503 {
504         kfree_skb(inet6_rsk(req)->pktopts);
505 }
506
507 #ifdef CONFIG_TCP_MD5SIG
508 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
509                                                    const struct in6_addr *addr)
510 {
511         return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
512 }
513
514 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
515                                                 struct sock *addr_sk)
516 {
517         return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
518 }
519
520 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
521                                                       struct request_sock *req)
522 {
523         return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
524 }
525
526 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
527                                   int optlen)
528 {
529         struct tcp_md5sig cmd;
530         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
531
532         if (optlen < sizeof(cmd))
533                 return -EINVAL;
534
535         if (copy_from_user(&cmd, optval, sizeof(cmd)))
536                 return -EFAULT;
537
538         if (sin6->sin6_family != AF_INET6)
539                 return -EINVAL;
540
541         if (!cmd.tcpm_keylen) {
542                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
543                         return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
544                                               AF_INET);
545                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
546                                       AF_INET6);
547         }
548
549         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
550                 return -EINVAL;
551
552         if (ipv6_addr_v4mapped(&sin6->sin6_addr))
553                 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
554                                       AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
555
556         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
557                               AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
558 }
559
560 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
561                                         const struct in6_addr *daddr,
562                                         const struct in6_addr *saddr, int nbytes)
563 {
564         struct tcp6_pseudohdr *bp;
565         struct scatterlist sg;
566
567         bp = &hp->md5_blk.ip6;
568         /* 1. TCP pseudo-header (RFC2460) */
569         bp->saddr = *saddr;
570         bp->daddr = *daddr;
571         bp->protocol = cpu_to_be32(IPPROTO_TCP);
572         bp->len = cpu_to_be32(nbytes);
573
574         sg_init_one(&sg, bp, sizeof(*bp));
575         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
576 }
577
578 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
579                                const struct in6_addr *daddr, struct in6_addr *saddr,
580                                const struct tcphdr *th)
581 {
582         struct tcp_md5sig_pool *hp;
583         struct hash_desc *desc;
584
585         hp = tcp_get_md5sig_pool();
586         if (!hp)
587                 goto clear_hash_noput;
588         desc = &hp->md5_desc;
589
590         if (crypto_hash_init(desc))
591                 goto clear_hash;
592         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
593                 goto clear_hash;
594         if (tcp_md5_hash_header(hp, th))
595                 goto clear_hash;
596         if (tcp_md5_hash_key(hp, key))
597                 goto clear_hash;
598         if (crypto_hash_final(desc, md5_hash))
599                 goto clear_hash;
600
601         tcp_put_md5sig_pool();
602         return 0;
603
604 clear_hash:
605         tcp_put_md5sig_pool();
606 clear_hash_noput:
607         memset(md5_hash, 0, 16);
608         return 1;
609 }
610
611 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
612                                const struct sock *sk,
613                                const struct request_sock *req,
614                                const struct sk_buff *skb)
615 {
616         const struct in6_addr *saddr, *daddr;
617         struct tcp_md5sig_pool *hp;
618         struct hash_desc *desc;
619         const struct tcphdr *th = tcp_hdr(skb);
620
621         if (sk) {
622                 saddr = &inet6_sk(sk)->saddr;
623                 daddr = &inet6_sk(sk)->daddr;
624         } else if (req) {
625                 saddr = &inet6_rsk(req)->loc_addr;
626                 daddr = &inet6_rsk(req)->rmt_addr;
627         } else {
628                 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
629                 saddr = &ip6h->saddr;
630                 daddr = &ip6h->daddr;
631         }
632
633         hp = tcp_get_md5sig_pool();
634         if (!hp)
635                 goto clear_hash_noput;
636         desc = &hp->md5_desc;
637
638         if (crypto_hash_init(desc))
639                 goto clear_hash;
640
641         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
642                 goto clear_hash;
643         if (tcp_md5_hash_header(hp, th))
644                 goto clear_hash;
645         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
646                 goto clear_hash;
647         if (tcp_md5_hash_key(hp, key))
648                 goto clear_hash;
649         if (crypto_hash_final(desc, md5_hash))
650                 goto clear_hash;
651
652         tcp_put_md5sig_pool();
653         return 0;
654
655 clear_hash:
656         tcp_put_md5sig_pool();
657 clear_hash_noput:
658         memset(md5_hash, 0, 16);
659         return 1;
660 }
661
662 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
663 {
664         const __u8 *hash_location = NULL;
665         struct tcp_md5sig_key *hash_expected;
666         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
667         const struct tcphdr *th = tcp_hdr(skb);
668         int genhash;
669         u8 newhash[16];
670
671         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
672         hash_location = tcp_parse_md5sig_option(th);
673
674         /* We've parsed the options - do we have a hash? */
675         if (!hash_expected && !hash_location)
676                 return 0;
677
678         if (hash_expected && !hash_location) {
679                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
680                 return 1;
681         }
682
683         if (!hash_expected && hash_location) {
684                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
685                 return 1;
686         }
687
688         /* check the signature */
689         genhash = tcp_v6_md5_hash_skb(newhash,
690                                       hash_expected,
691                                       NULL, NULL, skb);
692
693         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
694                 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
695                                      genhash ? "failed" : "mismatch",
696                                      &ip6h->saddr, ntohs(th->source),
697                                      &ip6h->daddr, ntohs(th->dest));
698                 return 1;
699         }
700         return 0;
701 }
702 #endif
703
704 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
705         .family         =       AF_INET6,
706         .obj_size       =       sizeof(struct tcp6_request_sock),
707         .rtx_syn_ack    =       tcp_v6_rtx_synack,
708         .send_ack       =       tcp_v6_reqsk_send_ack,
709         .destructor     =       tcp_v6_reqsk_destructor,
710         .send_reset     =       tcp_v6_send_reset,
711         .syn_ack_timeout =      tcp_syn_ack_timeout,
712 };
713
714 #ifdef CONFIG_TCP_MD5SIG
715 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
716         .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
717         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
718 };
719 #endif
720
721 static void __tcp_v6_send_check(struct sk_buff *skb,
722                                 const struct in6_addr *saddr, const struct in6_addr *daddr)
723 {
724         struct tcphdr *th = tcp_hdr(skb);
725
726         if (skb->ip_summed == CHECKSUM_PARTIAL) {
727                 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
728                 skb->csum_start = skb_transport_header(skb) - skb->head;
729                 skb->csum_offset = offsetof(struct tcphdr, check);
730         } else {
731                 th->check = tcp_v6_check(skb->len, saddr, daddr,
732                                          csum_partial(th, th->doff << 2,
733                                                       skb->csum));
734         }
735 }
736
737 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
738 {
739         struct ipv6_pinfo *np = inet6_sk(sk);
740
741         __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
742 }
743
744 static int tcp_v6_gso_send_check(struct sk_buff *skb)
745 {
746         const struct ipv6hdr *ipv6h;
747         struct tcphdr *th;
748
749         if (!pskb_may_pull(skb, sizeof(*th)))
750                 return -EINVAL;
751
752         ipv6h = ipv6_hdr(skb);
753         th = tcp_hdr(skb);
754
755         th->check = 0;
756         skb->ip_summed = CHECKSUM_PARTIAL;
757         __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
758         return 0;
759 }
760
761 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
762                                          struct sk_buff *skb)
763 {
764         const struct ipv6hdr *iph = skb_gro_network_header(skb);
765
766         switch (skb->ip_summed) {
767         case CHECKSUM_COMPLETE:
768                 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
769                                   skb->csum)) {
770                         skb->ip_summed = CHECKSUM_UNNECESSARY;
771                         break;
772                 }
773
774                 /* fall through */
775         case CHECKSUM_NONE:
776                 NAPI_GRO_CB(skb)->flush = 1;
777                 return NULL;
778         }
779
780         return tcp_gro_receive(head, skb);
781 }
782
783 static int tcp6_gro_complete(struct sk_buff *skb)
784 {
785         const struct ipv6hdr *iph = ipv6_hdr(skb);
786         struct tcphdr *th = tcp_hdr(skb);
787
788         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
789                                   &iph->saddr, &iph->daddr, 0);
790         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
791
792         return tcp_gro_complete(skb);
793 }
794
795 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
796                                  u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
797 {
798         const struct tcphdr *th = tcp_hdr(skb);
799         struct tcphdr *t1;
800         struct sk_buff *buff;
801         struct flowi6 fl6;
802         struct net *net = dev_net(skb_dst(skb)->dev);
803         struct sock *ctl_sk = net->ipv6.tcp_sk;
804         unsigned int tot_len = sizeof(struct tcphdr);
805         struct dst_entry *dst;
806         __be32 *topt;
807
808         if (ts)
809                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
810 #ifdef CONFIG_TCP_MD5SIG
811         if (key)
812                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
813 #endif
814
815         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
816                          GFP_ATOMIC);
817         if (buff == NULL)
818                 return;
819
820         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
821
822         t1 = (struct tcphdr *) skb_push(buff, tot_len);
823         skb_reset_transport_header(buff);
824
825         /* Swap the send and the receive. */
826         memset(t1, 0, sizeof(*t1));
827         t1->dest = th->source;
828         t1->source = th->dest;
829         t1->doff = tot_len / 4;
830         t1->seq = htonl(seq);
831         t1->ack_seq = htonl(ack);
832         t1->ack = !rst || !th->ack;
833         t1->rst = rst;
834         t1->window = htons(win);
835
836         topt = (__be32 *)(t1 + 1);
837
838         if (ts) {
839                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
840                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
841                 *topt++ = htonl(tcp_time_stamp);
842                 *topt++ = htonl(ts);
843         }
844
845 #ifdef CONFIG_TCP_MD5SIG
846         if (key) {
847                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
848                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
849                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
850                                     &ipv6_hdr(skb)->saddr,
851                                     &ipv6_hdr(skb)->daddr, t1);
852         }
853 #endif
854
855         memset(&fl6, 0, sizeof(fl6));
856         fl6.daddr = ipv6_hdr(skb)->saddr;
857         fl6.saddr = ipv6_hdr(skb)->daddr;
858
859         buff->ip_summed = CHECKSUM_PARTIAL;
860         buff->csum = 0;
861
862         __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
863
864         fl6.flowi6_proto = IPPROTO_TCP;
865         fl6.flowi6_oif = inet6_iif(skb);
866         fl6.fl6_dport = t1->dest;
867         fl6.fl6_sport = t1->source;
868         security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
869
870         /* Pass a socket to ip6_dst_lookup either it is for RST
871          * Underlying function will use this to retrieve the network
872          * namespace
873          */
874         dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
875         if (!IS_ERR(dst)) {
876                 skb_dst_set(buff, dst);
877                 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
878                 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
879                 if (rst)
880                         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
881                 return;
882         }
883
884         kfree_skb(buff);
885 }
886
887 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
888 {
889         const struct tcphdr *th = tcp_hdr(skb);
890         u32 seq = 0, ack_seq = 0;
891         struct tcp_md5sig_key *key = NULL;
892 #ifdef CONFIG_TCP_MD5SIG
893         const __u8 *hash_location = NULL;
894         struct ipv6hdr *ipv6h = ipv6_hdr(skb);
895         unsigned char newhash[16];
896         int genhash;
897         struct sock *sk1 = NULL;
898 #endif
899
900         if (th->rst)
901                 return;
902
903         if (!ipv6_unicast_destination(skb))
904                 return;
905
906 #ifdef CONFIG_TCP_MD5SIG
907         hash_location = tcp_parse_md5sig_option(th);
908         if (!sk && hash_location) {
909                 /*
910                  * active side is lost. Try to find listening socket through
911                  * source port, and then find md5 key through listening socket.
912                  * we are not loose security here:
913                  * Incoming packet is checked with md5 hash with finding key,
914                  * no RST generated if md5 hash doesn't match.
915                  */
916                 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
917                                            &tcp_hashinfo, &ipv6h->daddr,
918                                            ntohs(th->source), inet6_iif(skb));
919                 if (!sk1)
920                         return;
921
922                 rcu_read_lock();
923                 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
924                 if (!key)
925                         goto release_sk1;
926
927                 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
928                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
929                         goto release_sk1;
930         } else {
931                 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
932         }
933 #endif
934
935         if (th->ack)
936                 seq = ntohl(th->ack_seq);
937         else
938                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
939                           (th->doff << 2);
940
941         tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
942
943 #ifdef CONFIG_TCP_MD5SIG
944 release_sk1:
945         if (sk1) {
946                 rcu_read_unlock();
947                 sock_put(sk1);
948         }
949 #endif
950 }
951
952 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
953                             struct tcp_md5sig_key *key, u8 tclass)
954 {
955         tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
956 }
957
958 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
959 {
960         struct inet_timewait_sock *tw = inet_twsk(sk);
961         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
962
963         tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
964                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
965                         tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
966                         tw->tw_tclass);
967
968         inet_twsk_put(tw);
969 }
970
971 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
972                                   struct request_sock *req)
973 {
974         tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
975                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
976 }
977
978
979 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
980 {
981         struct request_sock *req, **prev;
982         const struct tcphdr *th = tcp_hdr(skb);
983         struct sock *nsk;
984
985         /* Find possible connection requests. */
986         req = inet6_csk_search_req(sk, &prev, th->source,
987                                    &ipv6_hdr(skb)->saddr,
988                                    &ipv6_hdr(skb)->daddr, inet6_iif(skb));
989         if (req)
990                 return tcp_check_req(sk, skb, req, prev);
991
992         nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
993                         &ipv6_hdr(skb)->saddr, th->source,
994                         &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
995
996         if (nsk) {
997                 if (nsk->sk_state != TCP_TIME_WAIT) {
998                         bh_lock_sock(nsk);
999                         return nsk;
1000                 }
1001                 inet_twsk_put(inet_twsk(nsk));
1002                 return NULL;
1003         }
1004
1005 #ifdef CONFIG_SYN_COOKIES
1006         if (!th->syn)
1007                 sk = cookie_v6_check(sk, skb);
1008 #endif
1009         return sk;
1010 }
1011
1012 /* FIXME: this is substantially similar to the ipv4 code.
1013  * Can some kind of merge be done? -- erics
1014  */
1015 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1016 {
1017         struct tcp_extend_values tmp_ext;
1018         struct tcp_options_received tmp_opt;
1019         const u8 *hash_location;
1020         struct request_sock *req;
1021         struct inet6_request_sock *treq;
1022         struct ipv6_pinfo *np = inet6_sk(sk);
1023         struct tcp_sock *tp = tcp_sk(sk);
1024         __u32 isn = TCP_SKB_CB(skb)->when;
1025         struct dst_entry *dst = NULL;
1026         struct flowi6 fl6;
1027         bool want_cookie = false;
1028
1029         if (skb->protocol == htons(ETH_P_IP))
1030                 return tcp_v4_conn_request(sk, skb);
1031
1032         if (!ipv6_unicast_destination(skb))
1033                 goto drop;
1034
1035         if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1036                 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1037                 if (!want_cookie)
1038                         goto drop;
1039         }
1040
1041         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1042                 goto drop;
1043
1044         req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1045         if (req == NULL)
1046                 goto drop;
1047
1048 #ifdef CONFIG_TCP_MD5SIG
1049         tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1050 #endif
1051
1052         tcp_clear_options(&tmp_opt);
1053         tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1054         tmp_opt.user_mss = tp->rx_opt.user_mss;
1055         tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
1056
1057         if (tmp_opt.cookie_plus > 0 &&
1058             tmp_opt.saw_tstamp &&
1059             !tp->rx_opt.cookie_out_never &&
1060             (sysctl_tcp_cookie_size > 0 ||
1061              (tp->cookie_values != NULL &&
1062               tp->cookie_values->cookie_desired > 0))) {
1063                 u8 *c;
1064                 u32 *d;
1065                 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1066                 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1067
1068                 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1069                         goto drop_and_free;
1070
1071                 /* Secret recipe starts with IP addresses */
1072                 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1073                 *mess++ ^= *d++;
1074                 *mess++ ^= *d++;
1075                 *mess++ ^= *d++;
1076                 *mess++ ^= *d++;
1077                 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1078                 *mess++ ^= *d++;
1079                 *mess++ ^= *d++;
1080                 *mess++ ^= *d++;
1081                 *mess++ ^= *d++;
1082
1083                 /* plus variable length Initiator Cookie */
1084                 c = (u8 *)mess;
1085                 while (l-- > 0)
1086                         *c++ ^= *hash_location++;
1087
1088                 want_cookie = false;    /* not our kind of cookie */
1089                 tmp_ext.cookie_out_never = 0; /* false */
1090                 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1091         } else if (!tp->rx_opt.cookie_in_always) {
1092                 /* redundant indications, but ensure initialization. */
1093                 tmp_ext.cookie_out_never = 1; /* true */
1094                 tmp_ext.cookie_plus = 0;
1095         } else {
1096                 goto drop_and_free;
1097         }
1098         tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1099
1100         if (want_cookie && !tmp_opt.saw_tstamp)
1101                 tcp_clear_options(&tmp_opt);
1102
1103         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1104         tcp_openreq_init(req, &tmp_opt, skb);
1105
1106         treq = inet6_rsk(req);
1107         treq->rmt_addr = ipv6_hdr(skb)->saddr;
1108         treq->loc_addr = ipv6_hdr(skb)->daddr;
1109         if (!want_cookie || tmp_opt.tstamp_ok)
1110                 TCP_ECN_create_request(req, skb);
1111
1112         treq->iif = sk->sk_bound_dev_if;
1113
1114         /* So that link locals have meaning */
1115         if (!sk->sk_bound_dev_if &&
1116             ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1117                 treq->iif = inet6_iif(skb);
1118
1119         if (!isn) {
1120                 if (ipv6_opt_accepted(sk, skb) ||
1121                     np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1122                     np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1123                         atomic_inc(&skb->users);
1124                         treq->pktopts = skb;
1125                 }
1126
1127                 if (want_cookie) {
1128                         isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1129                         req->cookie_ts = tmp_opt.tstamp_ok;
1130                         goto have_isn;
1131                 }
1132
1133                 /* VJ's idea. We save last timestamp seen
1134                  * from the destination in peer table, when entering
1135                  * state TIME-WAIT, and check against it before
1136                  * accepting new connection request.
1137                  *
1138                  * If "isn" is not zero, this request hit alive
1139                  * timewait bucket, so that all the necessary checks
1140                  * are made in the function processing timewait state.
1141                  */
1142                 if (tmp_opt.saw_tstamp &&
1143                     tcp_death_row.sysctl_tw_recycle &&
1144                     (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1145                         if (!tcp_peer_is_proven(req, dst, true)) {
1146                                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1147                                 goto drop_and_release;
1148                         }
1149                 }
1150                 /* Kill the following clause, if you dislike this way. */
1151                 else if (!sysctl_tcp_syncookies &&
1152                          (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1153                           (sysctl_max_syn_backlog >> 2)) &&
1154                          !tcp_peer_is_proven(req, dst, false)) {
1155                         /* Without syncookies last quarter of
1156                          * backlog is filled with destinations,
1157                          * proven to be alive.
1158                          * It means that we continue to communicate
1159                          * to destinations, already remembered
1160                          * to the moment of synflood.
1161                          */
1162                         LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1163                                        &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1164                         goto drop_and_release;
1165                 }
1166
1167                 isn = tcp_v6_init_sequence(skb);
1168         }
1169 have_isn:
1170         tcp_rsk(req)->snt_isn = isn;
1171         tcp_rsk(req)->snt_synack = tcp_time_stamp;
1172
1173         if (security_inet_conn_request(sk, skb, req))
1174                 goto drop_and_release;
1175
1176         if (tcp_v6_send_synack(sk, dst, &fl6, req,
1177                                (struct request_values *)&tmp_ext,
1178                                skb_get_queue_mapping(skb)) ||
1179             want_cookie)
1180                 goto drop_and_free;
1181
1182         inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1183         return 0;
1184
1185 drop_and_release:
1186         dst_release(dst);
1187 drop_and_free:
1188         reqsk_free(req);
1189 drop:
1190         return 0; /* don't send reset */
1191 }
1192
1193 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1194                                           struct request_sock *req,
1195                                           struct dst_entry *dst)
1196 {
1197         struct inet6_request_sock *treq;
1198         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1199         struct tcp6_sock *newtcp6sk;
1200         struct inet_sock *newinet;
1201         struct tcp_sock *newtp;
1202         struct sock *newsk;
1203 #ifdef CONFIG_TCP_MD5SIG
1204         struct tcp_md5sig_key *key;
1205 #endif
1206         struct flowi6 fl6;
1207
1208         if (skb->protocol == htons(ETH_P_IP)) {
1209                 /*
1210                  *      v6 mapped
1211                  */
1212
1213                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1214
1215                 if (newsk == NULL)
1216                         return NULL;
1217
1218                 newtcp6sk = (struct tcp6_sock *)newsk;
1219                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1220
1221                 newinet = inet_sk(newsk);
1222                 newnp = inet6_sk(newsk);
1223                 newtp = tcp_sk(newsk);
1224
1225                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1226
1227                 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1228
1229                 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1230
1231                 newnp->rcv_saddr = newnp->saddr;
1232
1233                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1234                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1235 #ifdef CONFIG_TCP_MD5SIG
1236                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1237 #endif
1238
1239                 newnp->ipv6_ac_list = NULL;
1240                 newnp->ipv6_fl_list = NULL;
1241                 newnp->pktoptions  = NULL;
1242                 newnp->opt         = NULL;
1243                 newnp->mcast_oif   = inet6_iif(skb);
1244                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1245                 newnp->rcv_tclass  = ipv6_tclass(ipv6_hdr(skb));
1246
1247                 /*
1248                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1249                  * here, tcp_create_openreq_child now does this for us, see the comment in
1250                  * that function for the gory details. -acme
1251                  */
1252
1253                 /* It is tricky place. Until this moment IPv4 tcp
1254                    worked with IPv6 icsk.icsk_af_ops.
1255                    Sync it now.
1256                  */
1257                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1258
1259                 return newsk;
1260         }
1261
1262         treq = inet6_rsk(req);
1263
1264         if (sk_acceptq_is_full(sk))
1265                 goto out_overflow;
1266
1267         if (!dst) {
1268                 dst = inet6_csk_route_req(sk, &fl6, req);
1269                 if (!dst)
1270                         goto out;
1271         }
1272
1273         newsk = tcp_create_openreq_child(sk, req, skb);
1274         if (newsk == NULL)
1275                 goto out_nonewsk;
1276
1277         /*
1278          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1279          * count here, tcp_create_openreq_child now does this for us, see the
1280          * comment in that function for the gory details. -acme
1281          */
1282
1283         newsk->sk_gso_type = SKB_GSO_TCPV6;
1284         __ip6_dst_store(newsk, dst, NULL, NULL);
1285         inet6_sk_rx_dst_set(newsk, skb);
1286
1287         newtcp6sk = (struct tcp6_sock *)newsk;
1288         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1289
1290         newtp = tcp_sk(newsk);
1291         newinet = inet_sk(newsk);
1292         newnp = inet6_sk(newsk);
1293
1294         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1295
1296         newnp->daddr = treq->rmt_addr;
1297         newnp->saddr = treq->loc_addr;
1298         newnp->rcv_saddr = treq->loc_addr;
1299         newsk->sk_bound_dev_if = treq->iif;
1300
1301         /* Now IPv6 options...
1302
1303            First: no IPv4 options.
1304          */
1305         newinet->inet_opt = NULL;
1306         newnp->ipv6_ac_list = NULL;
1307         newnp->ipv6_fl_list = NULL;
1308
1309         /* Clone RX bits */
1310         newnp->rxopt.all = np->rxopt.all;
1311
1312         /* Clone pktoptions received with SYN */
1313         newnp->pktoptions = NULL;
1314         if (treq->pktopts != NULL) {
1315                 newnp->pktoptions = skb_clone(treq->pktopts,
1316                                               sk_gfp_atomic(sk, GFP_ATOMIC));
1317                 consume_skb(treq->pktopts);
1318                 treq->pktopts = NULL;
1319                 if (newnp->pktoptions)
1320                         skb_set_owner_r(newnp->pktoptions, newsk);
1321         }
1322         newnp->opt        = NULL;
1323         newnp->mcast_oif  = inet6_iif(skb);
1324         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1325         newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1326
1327         /* Clone native IPv6 options from listening socket (if any)
1328
1329            Yes, keeping reference count would be much more clever,
1330            but we make one more one thing there: reattach optmem
1331            to newsk.
1332          */
1333         if (np->opt)
1334                 newnp->opt = ipv6_dup_options(newsk, np->opt);
1335
1336         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1337         if (newnp->opt)
1338                 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1339                                                      newnp->opt->opt_flen);
1340
1341         tcp_mtup_init(newsk);
1342         tcp_sync_mss(newsk, dst_mtu(dst));
1343         newtp->advmss = dst_metric_advmss(dst);
1344         if (tcp_sk(sk)->rx_opt.user_mss &&
1345             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1346                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1347
1348         tcp_initialize_rcv_mss(newsk);
1349         if (tcp_rsk(req)->snt_synack)
1350                 tcp_valid_rtt_meas(newsk,
1351                     tcp_time_stamp - tcp_rsk(req)->snt_synack);
1352         newtp->total_retrans = req->retrans;
1353
1354         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1355         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1356
1357 #ifdef CONFIG_TCP_MD5SIG
1358         /* Copy over the MD5 key from the original socket */
1359         if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1360                 /* We're using one, so create a matching key
1361                  * on the newsk structure. If we fail to get
1362                  * memory, then we end up not copying the key
1363                  * across. Shucks.
1364                  */
1365                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1366                                AF_INET6, key->key, key->keylen,
1367                                sk_gfp_atomic(sk, GFP_ATOMIC));
1368         }
1369 #endif
1370
1371         if (__inet_inherit_port(sk, newsk) < 0) {
1372                 sock_put(newsk);
1373                 goto out;
1374         }
1375         __inet6_hash(newsk, NULL);
1376
1377         return newsk;
1378
1379 out_overflow:
1380         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1381 out_nonewsk:
1382         dst_release(dst);
1383 out:
1384         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1385         return NULL;
1386 }
1387
1388 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1389 {
1390         if (skb->ip_summed == CHECKSUM_COMPLETE) {
1391                 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1392                                   &ipv6_hdr(skb)->daddr, skb->csum)) {
1393                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1394                         return 0;
1395                 }
1396         }
1397
1398         skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1399                                               &ipv6_hdr(skb)->saddr,
1400                                               &ipv6_hdr(skb)->daddr, 0));
1401
1402         if (skb->len <= 76) {
1403                 return __skb_checksum_complete(skb);
1404         }
1405         return 0;
1406 }
1407
1408 /* The socket must have it's spinlock held when we get
1409  * here.
1410  *
1411  * We have a potential double-lock case here, so even when
1412  * doing backlog processing we use the BH locking scheme.
1413  * This is because we cannot sleep with the original spinlock
1414  * held.
1415  */
1416 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1417 {
1418         struct ipv6_pinfo *np = inet6_sk(sk);
1419         struct tcp_sock *tp;
1420         struct sk_buff *opt_skb = NULL;
1421
1422         /* Imagine: socket is IPv6. IPv4 packet arrives,
1423            goes to IPv4 receive handler and backlogged.
1424            From backlog it always goes here. Kerboom...
1425            Fortunately, tcp_rcv_established and rcv_established
1426            handle them correctly, but it is not case with
1427            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1428          */
1429
1430         if (skb->protocol == htons(ETH_P_IP))
1431                 return tcp_v4_do_rcv(sk, skb);
1432
1433 #ifdef CONFIG_TCP_MD5SIG
1434         if (tcp_v6_inbound_md5_hash (sk, skb))
1435                 goto discard;
1436 #endif
1437
1438         if (sk_filter(sk, skb))
1439                 goto discard;
1440
1441         /*
1442          *      socket locking is here for SMP purposes as backlog rcv
1443          *      is currently called with bh processing disabled.
1444          */
1445
1446         /* Do Stevens' IPV6_PKTOPTIONS.
1447
1448            Yes, guys, it is the only place in our code, where we
1449            may make it not affecting IPv4.
1450            The rest of code is protocol independent,
1451            and I do not like idea to uglify IPv4.
1452
1453            Actually, all the idea behind IPV6_PKTOPTIONS
1454            looks not very well thought. For now we latch
1455            options, received in the last packet, enqueued
1456            by tcp. Feel free to propose better solution.
1457                                                --ANK (980728)
1458          */
1459         if (np->rxopt.all)
1460                 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1461
1462         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1463                 struct dst_entry *dst = sk->sk_rx_dst;
1464
1465                 sock_rps_save_rxhash(sk, skb);
1466                 if (dst) {
1467                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1468                             dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1469                                 dst_release(dst);
1470                                 sk->sk_rx_dst = NULL;
1471                         }
1472                 }
1473
1474                 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1475                         goto reset;
1476                 if (opt_skb)
1477                         goto ipv6_pktoptions;
1478                 return 0;
1479         }
1480
1481         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1482                 goto csum_err;
1483
1484         if (sk->sk_state == TCP_LISTEN) {
1485                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1486                 if (!nsk)
1487                         goto discard;
1488
1489                 /*
1490                  * Queue it on the new socket if the new socket is active,
1491                  * otherwise we just shortcircuit this and continue with
1492                  * the new socket..
1493                  */
1494                 if(nsk != sk) {
1495                         sock_rps_save_rxhash(nsk, skb);
1496                         if (tcp_child_process(sk, nsk, skb))
1497                                 goto reset;
1498                         if (opt_skb)
1499                                 __kfree_skb(opt_skb);
1500                         return 0;
1501                 }
1502         } else
1503                 sock_rps_save_rxhash(sk, skb);
1504
1505         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1506                 goto reset;
1507         if (opt_skb)
1508                 goto ipv6_pktoptions;
1509         return 0;
1510
1511 reset:
1512         tcp_v6_send_reset(sk, skb);
1513 discard:
1514         if (opt_skb)
1515                 __kfree_skb(opt_skb);
1516         kfree_skb(skb);
1517         return 0;
1518 csum_err:
1519         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1520         goto discard;
1521
1522
1523 ipv6_pktoptions:
1524         /* Do you ask, what is it?
1525
1526            1. skb was enqueued by tcp.
1527            2. skb is added to tail of read queue, rather than out of order.
1528            3. socket is not in passive state.
1529            4. Finally, it really contains options, which user wants to receive.
1530          */
1531         tp = tcp_sk(sk);
1532         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1533             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1534                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1535                         np->mcast_oif = inet6_iif(opt_skb);
1536                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1537                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1538                 if (np->rxopt.bits.rxtclass)
1539                         np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1540                 if (ipv6_opt_accepted(sk, opt_skb)) {
1541                         skb_set_owner_r(opt_skb, sk);
1542                         opt_skb = xchg(&np->pktoptions, opt_skb);
1543                 } else {
1544                         __kfree_skb(opt_skb);
1545                         opt_skb = xchg(&np->pktoptions, NULL);
1546                 }
1547         }
1548
1549         kfree_skb(opt_skb);
1550         return 0;
1551 }
1552
1553 static int tcp_v6_rcv(struct sk_buff *skb)
1554 {
1555         const struct tcphdr *th;
1556         const struct ipv6hdr *hdr;
1557         struct sock *sk;
1558         int ret;
1559         struct net *net = dev_net(skb->dev);
1560
1561         if (skb->pkt_type != PACKET_HOST)
1562                 goto discard_it;
1563
1564         /*
1565          *      Count it even if it's bad.
1566          */
1567         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1568
1569         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1570                 goto discard_it;
1571
1572         th = tcp_hdr(skb);
1573
1574         if (th->doff < sizeof(struct tcphdr)/4)
1575                 goto bad_packet;
1576         if (!pskb_may_pull(skb, th->doff*4))
1577                 goto discard_it;
1578
1579         if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1580                 goto bad_packet;
1581
1582         th = tcp_hdr(skb);
1583         hdr = ipv6_hdr(skb);
1584         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1585         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1586                                     skb->len - th->doff*4);
1587         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1588         TCP_SKB_CB(skb)->when = 0;
1589         TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1590         TCP_SKB_CB(skb)->sacked = 0;
1591
1592         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1593         if (!sk)
1594                 goto no_tcp_socket;
1595
1596 process:
1597         if (sk->sk_state == TCP_TIME_WAIT)
1598                 goto do_time_wait;
1599
1600         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1601                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1602                 goto discard_and_relse;
1603         }
1604
1605         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1606                 goto discard_and_relse;
1607
1608         if (sk_filter(sk, skb))
1609                 goto discard_and_relse;
1610
1611         skb->dev = NULL;
1612
1613         bh_lock_sock_nested(sk);
1614         ret = 0;
1615         if (!sock_owned_by_user(sk)) {
1616 #ifdef CONFIG_NET_DMA
1617                 struct tcp_sock *tp = tcp_sk(sk);
1618                 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1619                         tp->ucopy.dma_chan = net_dma_find_channel();
1620                 if (tp->ucopy.dma_chan)
1621                         ret = tcp_v6_do_rcv(sk, skb);
1622                 else
1623 #endif
1624                 {
1625                         if (!tcp_prequeue(sk, skb))
1626                                 ret = tcp_v6_do_rcv(sk, skb);
1627                 }
1628         } else if (unlikely(sk_add_backlog(sk, skb,
1629                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1630                 bh_unlock_sock(sk);
1631                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1632                 goto discard_and_relse;
1633         }
1634         bh_unlock_sock(sk);
1635
1636         sock_put(sk);
1637         return ret ? -1 : 0;
1638
1639 no_tcp_socket:
1640         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1641                 goto discard_it;
1642
1643         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1644 bad_packet:
1645                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1646         } else {
1647                 tcp_v6_send_reset(NULL, skb);
1648         }
1649
1650 discard_it:
1651
1652         /*
1653          *      Discard frame
1654          */
1655
1656         kfree_skb(skb);
1657         return 0;
1658
1659 discard_and_relse:
1660         sock_put(sk);
1661         goto discard_it;
1662
1663 do_time_wait:
1664         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1665                 inet_twsk_put(inet_twsk(sk));
1666                 goto discard_it;
1667         }
1668
1669         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1670                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1671                 inet_twsk_put(inet_twsk(sk));
1672                 goto discard_it;
1673         }
1674
1675         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1676         case TCP_TW_SYN:
1677         {
1678                 struct sock *sk2;
1679
1680                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1681                                             &ipv6_hdr(skb)->daddr,
1682                                             ntohs(th->dest), inet6_iif(skb));
1683                 if (sk2 != NULL) {
1684                         struct inet_timewait_sock *tw = inet_twsk(sk);
1685                         inet_twsk_deschedule(tw, &tcp_death_row);
1686                         inet_twsk_put(tw);
1687                         sk = sk2;
1688                         goto process;
1689                 }
1690                 /* Fall through to ACK */
1691         }
1692         case TCP_TW_ACK:
1693                 tcp_v6_timewait_ack(sk, skb);
1694                 break;
1695         case TCP_TW_RST:
1696                 goto no_tcp_socket;
1697         case TCP_TW_SUCCESS:;
1698         }
1699         goto discard_it;
1700 }
1701
1702 static void tcp_v6_early_demux(struct sk_buff *skb)
1703 {
1704         const struct ipv6hdr *hdr;
1705         const struct tcphdr *th;
1706         struct sock *sk;
1707
1708         if (skb->pkt_type != PACKET_HOST)
1709                 return;
1710
1711         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1712                 return;
1713
1714         hdr = ipv6_hdr(skb);
1715         th = tcp_hdr(skb);
1716
1717         if (th->doff < sizeof(struct tcphdr) / 4)
1718                 return;
1719
1720         sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1721                                         &hdr->saddr, th->source,
1722                                         &hdr->daddr, ntohs(th->dest),
1723                                         inet6_iif(skb));
1724         if (sk) {
1725                 skb->sk = sk;
1726                 skb->destructor = sock_edemux;
1727                 if (sk->sk_state != TCP_TIME_WAIT) {
1728                         struct dst_entry *dst = sk->sk_rx_dst;
1729                         struct inet_sock *icsk = inet_sk(sk);
1730                         if (dst)
1731                                 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1732                         if (dst &&
1733                             icsk->rx_dst_ifindex == skb->skb_iif)
1734                                 skb_dst_set_noref(skb, dst);
1735                 }
1736         }
1737 }
1738
1739 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1740         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1741         .twsk_unique    = tcp_twsk_unique,
1742         .twsk_destructor= tcp_twsk_destructor,
1743 };
1744
1745 static const struct inet_connection_sock_af_ops ipv6_specific = {
1746         .queue_xmit        = inet6_csk_xmit,
1747         .send_check        = tcp_v6_send_check,
1748         .rebuild_header    = inet6_sk_rebuild_header,
1749         .sk_rx_dst_set     = inet6_sk_rx_dst_set,
1750         .conn_request      = tcp_v6_conn_request,
1751         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1752         .net_header_len    = sizeof(struct ipv6hdr),
1753         .net_frag_header_len = sizeof(struct frag_hdr),
1754         .setsockopt        = ipv6_setsockopt,
1755         .getsockopt        = ipv6_getsockopt,
1756         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1757         .sockaddr_len      = sizeof(struct sockaddr_in6),
1758         .bind_conflict     = inet6_csk_bind_conflict,
1759 #ifdef CONFIG_COMPAT
1760         .compat_setsockopt = compat_ipv6_setsockopt,
1761         .compat_getsockopt = compat_ipv6_getsockopt,
1762 #endif
1763 };
1764
1765 #ifdef CONFIG_TCP_MD5SIG
1766 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1767         .md5_lookup     =       tcp_v6_md5_lookup,
1768         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1769         .md5_parse      =       tcp_v6_parse_md5_keys,
1770 };
1771 #endif
1772
1773 /*
1774  *      TCP over IPv4 via INET6 API
1775  */
1776
1777 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1778         .queue_xmit        = ip_queue_xmit,
1779         .send_check        = tcp_v4_send_check,
1780         .rebuild_header    = inet_sk_rebuild_header,
1781         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1782         .conn_request      = tcp_v6_conn_request,
1783         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1784         .net_header_len    = sizeof(struct iphdr),
1785         .setsockopt        = ipv6_setsockopt,
1786         .getsockopt        = ipv6_getsockopt,
1787         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1788         .sockaddr_len      = sizeof(struct sockaddr_in6),
1789         .bind_conflict     = inet6_csk_bind_conflict,
1790 #ifdef CONFIG_COMPAT
1791         .compat_setsockopt = compat_ipv6_setsockopt,
1792         .compat_getsockopt = compat_ipv6_getsockopt,
1793 #endif
1794 };
1795
1796 #ifdef CONFIG_TCP_MD5SIG
1797 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1798         .md5_lookup     =       tcp_v4_md5_lookup,
1799         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1800         .md5_parse      =       tcp_v6_parse_md5_keys,
1801 };
1802 #endif
1803
1804 /* NOTE: A lot of things set to zero explicitly by call to
1805  *       sk_alloc() so need not be done here.
1806  */
1807 static int tcp_v6_init_sock(struct sock *sk)
1808 {
1809         struct inet_connection_sock *icsk = inet_csk(sk);
1810
1811         tcp_init_sock(sk);
1812
1813         icsk->icsk_af_ops = &ipv6_specific;
1814
1815 #ifdef CONFIG_TCP_MD5SIG
1816         tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1817 #endif
1818
1819         return 0;
1820 }
1821
1822 static void tcp_v6_destroy_sock(struct sock *sk)
1823 {
1824         tcp_v4_destroy_sock(sk);
1825         inet6_destroy_sock(sk);
1826 }
1827
1828 #ifdef CONFIG_PROC_FS
1829 /* Proc filesystem TCPv6 sock list dumping. */
1830 static void get_openreq6(struct seq_file *seq,
1831                          const struct sock *sk, struct request_sock *req, int i, int uid)
1832 {
1833         int ttd = req->expires - jiffies;
1834         const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1835         const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1836
1837         if (ttd < 0)
1838                 ttd = 0;
1839
1840         seq_printf(seq,
1841                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1842                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1843                    i,
1844                    src->s6_addr32[0], src->s6_addr32[1],
1845                    src->s6_addr32[2], src->s6_addr32[3],
1846                    ntohs(inet_rsk(req)->loc_port),
1847                    dest->s6_addr32[0], dest->s6_addr32[1],
1848                    dest->s6_addr32[2], dest->s6_addr32[3],
1849                    ntohs(inet_rsk(req)->rmt_port),
1850                    TCP_SYN_RECV,
1851                    0,0, /* could print option size, but that is af dependent. */
1852                    1,   /* timers active (only the expire timer) */
1853                    jiffies_to_clock_t(ttd),
1854                    req->retrans,
1855                    uid,
1856                    0,  /* non standard timer */
1857                    0, /* open_requests have no inode */
1858                    0, req);
1859 }
1860
1861 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1862 {
1863         const struct in6_addr *dest, *src;
1864         __u16 destp, srcp;
1865         int timer_active;
1866         unsigned long timer_expires;
1867         const struct inet_sock *inet = inet_sk(sp);
1868         const struct tcp_sock *tp = tcp_sk(sp);
1869         const struct inet_connection_sock *icsk = inet_csk(sp);
1870         const struct ipv6_pinfo *np = inet6_sk(sp);
1871
1872         dest  = &np->daddr;
1873         src   = &np->rcv_saddr;
1874         destp = ntohs(inet->inet_dport);
1875         srcp  = ntohs(inet->inet_sport);
1876
1877         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1878                 timer_active    = 1;
1879                 timer_expires   = icsk->icsk_timeout;
1880         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1881                 timer_active    = 4;
1882                 timer_expires   = icsk->icsk_timeout;
1883         } else if (timer_pending(&sp->sk_timer)) {
1884                 timer_active    = 2;
1885                 timer_expires   = sp->sk_timer.expires;
1886         } else {
1887                 timer_active    = 0;
1888                 timer_expires = jiffies;
1889         }
1890
1891         seq_printf(seq,
1892                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1893                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1894                    i,
1895                    src->s6_addr32[0], src->s6_addr32[1],
1896                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1897                    dest->s6_addr32[0], dest->s6_addr32[1],
1898                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1899                    sp->sk_state,
1900                    tp->write_seq-tp->snd_una,
1901                    (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1902                    timer_active,
1903                    jiffies_to_clock_t(timer_expires - jiffies),
1904                    icsk->icsk_retransmits,
1905                    sock_i_uid(sp),
1906                    icsk->icsk_probes_out,
1907                    sock_i_ino(sp),
1908                    atomic_read(&sp->sk_refcnt), sp,
1909                    jiffies_to_clock_t(icsk->icsk_rto),
1910                    jiffies_to_clock_t(icsk->icsk_ack.ato),
1911                    (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1912                    tp->snd_cwnd,
1913                    tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1914                    );
1915 }
1916
1917 static void get_timewait6_sock(struct seq_file *seq,
1918                                struct inet_timewait_sock *tw, int i)
1919 {
1920         const struct in6_addr *dest, *src;
1921         __u16 destp, srcp;
1922         const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1923         int ttd = tw->tw_ttd - jiffies;
1924
1925         if (ttd < 0)
1926                 ttd = 0;
1927
1928         dest = &tw6->tw_v6_daddr;
1929         src  = &tw6->tw_v6_rcv_saddr;
1930         destp = ntohs(tw->tw_dport);
1931         srcp  = ntohs(tw->tw_sport);
1932
1933         seq_printf(seq,
1934                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1935                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1936                    i,
1937                    src->s6_addr32[0], src->s6_addr32[1],
1938                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1939                    dest->s6_addr32[0], dest->s6_addr32[1],
1940                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1941                    tw->tw_substate, 0, 0,
1942                    3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1943                    atomic_read(&tw->tw_refcnt), tw);
1944 }
1945
1946 static int tcp6_seq_show(struct seq_file *seq, void *v)
1947 {
1948         struct tcp_iter_state *st;
1949
1950         if (v == SEQ_START_TOKEN) {
1951                 seq_puts(seq,
1952                          "  sl  "
1953                          "local_address                         "
1954                          "remote_address                        "
1955                          "st tx_queue rx_queue tr tm->when retrnsmt"
1956                          "   uid  timeout inode\n");
1957                 goto out;
1958         }
1959         st = seq->private;
1960
1961         switch (st->state) {
1962         case TCP_SEQ_STATE_LISTENING:
1963         case TCP_SEQ_STATE_ESTABLISHED:
1964                 get_tcp6_sock(seq, v, st->num);
1965                 break;
1966         case TCP_SEQ_STATE_OPENREQ:
1967                 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1968                 break;
1969         case TCP_SEQ_STATE_TIME_WAIT:
1970                 get_timewait6_sock(seq, v, st->num);
1971                 break;
1972         }
1973 out:
1974         return 0;
1975 }
1976
1977 static const struct file_operations tcp6_afinfo_seq_fops = {
1978         .owner   = THIS_MODULE,
1979         .open    = tcp_seq_open,
1980         .read    = seq_read,
1981         .llseek  = seq_lseek,
1982         .release = seq_release_net
1983 };
1984
1985 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1986         .name           = "tcp6",
1987         .family         = AF_INET6,
1988         .seq_fops       = &tcp6_afinfo_seq_fops,
1989         .seq_ops        = {
1990                 .show           = tcp6_seq_show,
1991         },
1992 };
1993
1994 int __net_init tcp6_proc_init(struct net *net)
1995 {
1996         return tcp_proc_register(net, &tcp6_seq_afinfo);
1997 }
1998
1999 void tcp6_proc_exit(struct net *net)
2000 {
2001         tcp_proc_unregister(net, &tcp6_seq_afinfo);
2002 }
2003 #endif
2004
2005 struct proto tcpv6_prot = {
2006         .name                   = "TCPv6",
2007         .owner                  = THIS_MODULE,
2008         .close                  = tcp_close,
2009         .connect                = tcp_v6_connect,
2010         .disconnect             = tcp_disconnect,
2011         .accept                 = inet_csk_accept,
2012         .ioctl                  = tcp_ioctl,
2013         .init                   = tcp_v6_init_sock,
2014         .destroy                = tcp_v6_destroy_sock,
2015         .shutdown               = tcp_shutdown,
2016         .setsockopt             = tcp_setsockopt,
2017         .getsockopt             = tcp_getsockopt,
2018         .recvmsg                = tcp_recvmsg,
2019         .sendmsg                = tcp_sendmsg,
2020         .sendpage               = tcp_sendpage,
2021         .backlog_rcv            = tcp_v6_do_rcv,
2022         .release_cb             = tcp_release_cb,
2023         .mtu_reduced            = tcp_v6_mtu_reduced,
2024         .hash                   = tcp_v6_hash,
2025         .unhash                 = inet_unhash,
2026         .get_port               = inet_csk_get_port,
2027         .enter_memory_pressure  = tcp_enter_memory_pressure,
2028         .sockets_allocated      = &tcp_sockets_allocated,
2029         .memory_allocated       = &tcp_memory_allocated,
2030         .memory_pressure        = &tcp_memory_pressure,
2031         .orphan_count           = &tcp_orphan_count,
2032         .sysctl_wmem            = sysctl_tcp_wmem,
2033         .sysctl_rmem            = sysctl_tcp_rmem,
2034         .max_header             = MAX_TCP_HEADER,
2035         .obj_size               = sizeof(struct tcp6_sock),
2036         .slab_flags             = SLAB_DESTROY_BY_RCU,
2037         .twsk_prot              = &tcp6_timewait_sock_ops,
2038         .rsk_prot               = &tcp6_request_sock_ops,
2039         .h.hashinfo             = &tcp_hashinfo,
2040         .no_autobind            = true,
2041 #ifdef CONFIG_COMPAT
2042         .compat_setsockopt      = compat_tcp_setsockopt,
2043         .compat_getsockopt      = compat_tcp_getsockopt,
2044 #endif
2045 #ifdef CONFIG_MEMCG_KMEM
2046         .proto_cgroup           = tcp_proto_cgroup,
2047 #endif
2048 };
2049
2050 static const struct inet6_protocol tcpv6_protocol = {
2051         .early_demux    =       tcp_v6_early_demux,
2052         .handler        =       tcp_v6_rcv,
2053         .err_handler    =       tcp_v6_err,
2054         .gso_send_check =       tcp_v6_gso_send_check,
2055         .gso_segment    =       tcp_tso_segment,
2056         .gro_receive    =       tcp6_gro_receive,
2057         .gro_complete   =       tcp6_gro_complete,
2058         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2059 };
2060
2061 static struct inet_protosw tcpv6_protosw = {
2062         .type           =       SOCK_STREAM,
2063         .protocol       =       IPPROTO_TCP,
2064         .prot           =       &tcpv6_prot,
2065         .ops            =       &inet6_stream_ops,
2066         .no_check       =       0,
2067         .flags          =       INET_PROTOSW_PERMANENT |
2068                                 INET_PROTOSW_ICSK,
2069 };
2070
2071 static int __net_init tcpv6_net_init(struct net *net)
2072 {
2073         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2074                                     SOCK_RAW, IPPROTO_TCP, net);
2075 }
2076
2077 static void __net_exit tcpv6_net_exit(struct net *net)
2078 {
2079         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2080 }
2081
2082 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2083 {
2084         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2085 }
2086
2087 static struct pernet_operations tcpv6_net_ops = {
2088         .init       = tcpv6_net_init,
2089         .exit       = tcpv6_net_exit,
2090         .exit_batch = tcpv6_net_exit_batch,
2091 };
2092
2093 int __init tcpv6_init(void)
2094 {
2095         int ret;
2096
2097         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2098         if (ret)
2099                 goto out;
2100
2101         /* register inet6 protocol */
2102         ret = inet6_register_protosw(&tcpv6_protosw);
2103         if (ret)
2104                 goto out_tcpv6_protocol;
2105
2106         ret = register_pernet_subsys(&tcpv6_net_ops);
2107         if (ret)
2108                 goto out_tcpv6_protosw;
2109 out:
2110         return ret;
2111
2112 out_tcpv6_protocol:
2113         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2114 out_tcpv6_protosw:
2115         inet6_unregister_protosw(&tcpv6_protosw);
2116         goto out;
2117 }
2118
2119 void tcpv6_exit(void)
2120 {
2121         unregister_pernet_subsys(&tcpv6_net_ops);
2122         inet6_unregister_protosw(&tcpv6_protosw);
2123         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2124 }