ip6_gre: fix flowi6_proto value in xmit path
[firefly-linux-kernel-4.4.55.git] / net / ipv6 / ip6_output.c
1 /*
2  *      IPv6 output functions
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on linux/net/ipv4/ip_output.c
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  *
15  *      Changes:
16  *      A.N.Kuznetsov   :       airthmetics in fragmentation.
17  *                              extension headers are implemented.
18  *                              route changes now work.
19  *                              ip6_forward does not confuse sniffers.
20  *                              etc.
21  *
22  *      H. von Brand    :       Added missing #include <linux/string.h>
23  *      Imran Patel     :       frag id should be in NBO
24  *      Kazunori MIYAZAWA @USAGI
25  *                      :       add ip6_append_data and related functions
26  *                              for datagram xmit
27  */
28
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
44
45 #include <net/sock.h>
46 #include <net/snmp.h>
47
48 #include <net/ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
54 #include <net/icmp.h>
55 #include <net/xfrm.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
58
59 int __ip6_local_out(struct sk_buff *skb)
60 {
61         int len;
62
63         len = skb->len - sizeof(struct ipv6hdr);
64         if (len > IPV6_MAXPLEN)
65                 len = 0;
66         ipv6_hdr(skb)->payload_len = htons(len);
67
68         return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
69                        skb_dst(skb)->dev, dst_output);
70 }
71
72 int ip6_local_out(struct sk_buff *skb)
73 {
74         int err;
75
76         err = __ip6_local_out(skb);
77         if (likely(err == 1))
78                 err = dst_output(skb);
79
80         return err;
81 }
82 EXPORT_SYMBOL_GPL(ip6_local_out);
83
84 static int ip6_finish_output2(struct sk_buff *skb)
85 {
86         struct dst_entry *dst = skb_dst(skb);
87         struct net_device *dev = dst->dev;
88         struct neighbour *neigh;
89         struct in6_addr *nexthop;
90         int ret;
91
92         skb->protocol = htons(ETH_P_IPV6);
93         skb->dev = dev;
94
95         if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
96                 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
97
98                 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
99                     ((mroute6_socket(dev_net(dev), skb) &&
100                      !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
101                      ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
102                                          &ipv6_hdr(skb)->saddr))) {
103                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
104
105                         /* Do not check for IFF_ALLMULTI; multicast routing
106                            is not supported in any case.
107                          */
108                         if (newskb)
109                                 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
110                                         newskb, NULL, newskb->dev,
111                                         dev_loopback_xmit);
112
113                         if (ipv6_hdr(skb)->hop_limit == 0) {
114                                 IP6_INC_STATS(dev_net(dev), idev,
115                                               IPSTATS_MIB_OUTDISCARDS);
116                                 kfree_skb(skb);
117                                 return 0;
118                         }
119                 }
120
121                 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
122                                 skb->len);
123
124                 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
125                     IPV6_ADDR_SCOPE_NODELOCAL &&
126                     !(dev->flags & IFF_LOOPBACK)) {
127                         kfree_skb(skb);
128                         return 0;
129                 }
130         }
131
132         rcu_read_lock_bh();
133         nexthop = rt6_nexthop((struct rt6_info *)dst);
134         neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
135         if (unlikely(!neigh))
136                 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
137         if (!IS_ERR(neigh)) {
138                 ret = dst_neigh_output(dst, neigh, skb);
139                 rcu_read_unlock_bh();
140                 return ret;
141         }
142         rcu_read_unlock_bh();
143
144         IP6_INC_STATS(dev_net(dst->dev),
145                       ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
146         kfree_skb(skb);
147         return -EINVAL;
148 }
149
150 static int ip6_finish_output(struct sk_buff *skb)
151 {
152         if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
153             dst_allfrag(skb_dst(skb)) ||
154             (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
155                 return ip6_fragment(skb, ip6_finish_output2);
156         else
157                 return ip6_finish_output2(skb);
158 }
159
160 int ip6_output(struct sk_buff *skb)
161 {
162         struct net_device *dev = skb_dst(skb)->dev;
163         struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
164         if (unlikely(idev->cnf.disable_ipv6)) {
165                 IP6_INC_STATS(dev_net(dev), idev,
166                               IPSTATS_MIB_OUTDISCARDS);
167                 kfree_skb(skb);
168                 return 0;
169         }
170
171         return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
172                             ip6_finish_output,
173                             !(IP6CB(skb)->flags & IP6SKB_REROUTED));
174 }
175
176 /*
177  *      xmit an sk_buff (used by TCP, SCTP and DCCP)
178  */
179
180 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
181              struct ipv6_txoptions *opt, int tclass)
182 {
183         struct net *net = sock_net(sk);
184         struct ipv6_pinfo *np = inet6_sk(sk);
185         struct in6_addr *first_hop = &fl6->daddr;
186         struct dst_entry *dst = skb_dst(skb);
187         struct ipv6hdr *hdr;
188         u8  proto = fl6->flowi6_proto;
189         int seg_len = skb->len;
190         int hlimit = -1;
191         u32 mtu;
192
193         if (opt) {
194                 unsigned int head_room;
195
196                 /* First: exthdrs may take lots of space (~8K for now)
197                    MAX_HEADER is not enough.
198                  */
199                 head_room = opt->opt_nflen + opt->opt_flen;
200                 seg_len += head_room;
201                 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
202
203                 if (skb_headroom(skb) < head_room) {
204                         struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
205                         if (skb2 == NULL) {
206                                 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
207                                               IPSTATS_MIB_OUTDISCARDS);
208                                 kfree_skb(skb);
209                                 return -ENOBUFS;
210                         }
211                         consume_skb(skb);
212                         skb = skb2;
213                         skb_set_owner_w(skb, sk);
214                 }
215                 if (opt->opt_flen)
216                         ipv6_push_frag_opts(skb, opt, &proto);
217                 if (opt->opt_nflen)
218                         ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
219         }
220
221         skb_push(skb, sizeof(struct ipv6hdr));
222         skb_reset_network_header(skb);
223         hdr = ipv6_hdr(skb);
224
225         /*
226          *      Fill in the IPv6 header
227          */
228         if (np)
229                 hlimit = np->hop_limit;
230         if (hlimit < 0)
231                 hlimit = ip6_dst_hoplimit(dst);
232
233         ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
234
235         hdr->payload_len = htons(seg_len);
236         hdr->nexthdr = proto;
237         hdr->hop_limit = hlimit;
238
239         hdr->saddr = fl6->saddr;
240         hdr->daddr = *first_hop;
241
242         skb->priority = sk->sk_priority;
243         skb->mark = sk->sk_mark;
244
245         mtu = dst_mtu(dst);
246         if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
247                 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
248                               IPSTATS_MIB_OUT, skb->len);
249                 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
250                                dst->dev, dst_output);
251         }
252
253         skb->dev = dst->dev;
254         ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
255         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
256         kfree_skb(skb);
257         return -EMSGSIZE;
258 }
259
260 EXPORT_SYMBOL(ip6_xmit);
261
262 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
263 {
264         struct ip6_ra_chain *ra;
265         struct sock *last = NULL;
266
267         read_lock(&ip6_ra_lock);
268         for (ra = ip6_ra_chain; ra; ra = ra->next) {
269                 struct sock *sk = ra->sk;
270                 if (sk && ra->sel == sel &&
271                     (!sk->sk_bound_dev_if ||
272                      sk->sk_bound_dev_if == skb->dev->ifindex)) {
273                         if (last) {
274                                 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
275                                 if (skb2)
276                                         rawv6_rcv(last, skb2);
277                         }
278                         last = sk;
279                 }
280         }
281
282         if (last) {
283                 rawv6_rcv(last, skb);
284                 read_unlock(&ip6_ra_lock);
285                 return 1;
286         }
287         read_unlock(&ip6_ra_lock);
288         return 0;
289 }
290
291 static int ip6_forward_proxy_check(struct sk_buff *skb)
292 {
293         struct ipv6hdr *hdr = ipv6_hdr(skb);
294         u8 nexthdr = hdr->nexthdr;
295         __be16 frag_off;
296         int offset;
297
298         if (ipv6_ext_hdr(nexthdr)) {
299                 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
300                 if (offset < 0)
301                         return 0;
302         } else
303                 offset = sizeof(struct ipv6hdr);
304
305         if (nexthdr == IPPROTO_ICMPV6) {
306                 struct icmp6hdr *icmp6;
307
308                 if (!pskb_may_pull(skb, (skb_network_header(skb) +
309                                          offset + 1 - skb->data)))
310                         return 0;
311
312                 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
313
314                 switch (icmp6->icmp6_type) {
315                 case NDISC_ROUTER_SOLICITATION:
316                 case NDISC_ROUTER_ADVERTISEMENT:
317                 case NDISC_NEIGHBOUR_SOLICITATION:
318                 case NDISC_NEIGHBOUR_ADVERTISEMENT:
319                 case NDISC_REDIRECT:
320                         /* For reaction involving unicast neighbor discovery
321                          * message destined to the proxied address, pass it to
322                          * input function.
323                          */
324                         return 1;
325                 default:
326                         break;
327                 }
328         }
329
330         /*
331          * The proxying router can't forward traffic sent to a link-local
332          * address, so signal the sender and discard the packet. This
333          * behavior is clarified by the MIPv6 specification.
334          */
335         if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
336                 dst_link_failure(skb);
337                 return -1;
338         }
339
340         return 0;
341 }
342
343 static inline int ip6_forward_finish(struct sk_buff *skb)
344 {
345         return dst_output(skb);
346 }
347
348 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
349 {
350         if (skb->len <= mtu)
351                 return false;
352
353         /* ipv6 conntrack defrag sets max_frag_size + local_df */
354         if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
355                 return true;
356
357         if (skb->local_df)
358                 return false;
359
360         if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
361                 return false;
362
363         return true;
364 }
365
366 int ip6_forward(struct sk_buff *skb)
367 {
368         struct dst_entry *dst = skb_dst(skb);
369         struct ipv6hdr *hdr = ipv6_hdr(skb);
370         struct inet6_skb_parm *opt = IP6CB(skb);
371         struct net *net = dev_net(dst->dev);
372         u32 mtu;
373
374         if (net->ipv6.devconf_all->forwarding == 0)
375                 goto error;
376
377         if (skb_warn_if_lro(skb))
378                 goto drop;
379
380         if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
381                 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
382                 goto drop;
383         }
384
385         if (skb->pkt_type != PACKET_HOST)
386                 goto drop;
387
388         skb_forward_csum(skb);
389
390         /*
391          *      We DO NOT make any processing on
392          *      RA packets, pushing them to user level AS IS
393          *      without ane WARRANTY that application will be able
394          *      to interpret them. The reason is that we
395          *      cannot make anything clever here.
396          *
397          *      We are not end-node, so that if packet contains
398          *      AH/ESP, we cannot make anything.
399          *      Defragmentation also would be mistake, RA packets
400          *      cannot be fragmented, because there is no warranty
401          *      that different fragments will go along one path. --ANK
402          */
403         if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
404                 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
405                         return 0;
406         }
407
408         /*
409          *      check and decrement ttl
410          */
411         if (hdr->hop_limit <= 1) {
412                 /* Force OUTPUT device used as source address */
413                 skb->dev = dst->dev;
414                 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
415                 IP6_INC_STATS_BH(net,
416                                  ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
417
418                 kfree_skb(skb);
419                 return -ETIMEDOUT;
420         }
421
422         /* XXX: idev->cnf.proxy_ndp? */
423         if (net->ipv6.devconf_all->proxy_ndp &&
424             pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
425                 int proxied = ip6_forward_proxy_check(skb);
426                 if (proxied > 0)
427                         return ip6_input(skb);
428                 else if (proxied < 0) {
429                         IP6_INC_STATS(net, ip6_dst_idev(dst),
430                                       IPSTATS_MIB_INDISCARDS);
431                         goto drop;
432                 }
433         }
434
435         if (!xfrm6_route_forward(skb)) {
436                 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
437                 goto drop;
438         }
439         dst = skb_dst(skb);
440
441         /* IPv6 specs say nothing about it, but it is clear that we cannot
442            send redirects to source routed frames.
443            We don't send redirects to frames decapsulated from IPsec.
444          */
445         if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
446                 struct in6_addr *target = NULL;
447                 struct inet_peer *peer;
448                 struct rt6_info *rt;
449
450                 /*
451                  *      incoming and outgoing devices are the same
452                  *      send a redirect.
453                  */
454
455                 rt = (struct rt6_info *) dst;
456                 if (rt->rt6i_flags & RTF_GATEWAY)
457                         target = &rt->rt6i_gateway;
458                 else
459                         target = &hdr->daddr;
460
461                 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
462
463                 /* Limit redirects both by destination (here)
464                    and by source (inside ndisc_send_redirect)
465                  */
466                 if (inet_peer_xrlim_allow(peer, 1*HZ))
467                         ndisc_send_redirect(skb, target);
468                 if (peer)
469                         inet_putpeer(peer);
470         } else {
471                 int addrtype = ipv6_addr_type(&hdr->saddr);
472
473                 /* This check is security critical. */
474                 if (addrtype == IPV6_ADDR_ANY ||
475                     addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
476                         goto error;
477                 if (addrtype & IPV6_ADDR_LINKLOCAL) {
478                         icmpv6_send(skb, ICMPV6_DEST_UNREACH,
479                                     ICMPV6_NOT_NEIGHBOUR, 0);
480                         goto error;
481                 }
482         }
483
484         mtu = dst_mtu(dst);
485         if (mtu < IPV6_MIN_MTU)
486                 mtu = IPV6_MIN_MTU;
487
488         if (ip6_pkt_too_big(skb, mtu)) {
489                 /* Again, force OUTPUT device used as source address */
490                 skb->dev = dst->dev;
491                 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
492                 IP6_INC_STATS_BH(net,
493                                  ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
494                 IP6_INC_STATS_BH(net,
495                                  ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
496                 kfree_skb(skb);
497                 return -EMSGSIZE;
498         }
499
500         if (skb_cow(skb, dst->dev->hard_header_len)) {
501                 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
502                 goto drop;
503         }
504
505         hdr = ipv6_hdr(skb);
506
507         /* Mangling hops number delayed to point after skb COW */
508
509         hdr->hop_limit--;
510
511         IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
512         IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
513         return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
514                        ip6_forward_finish);
515
516 error:
517         IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
518 drop:
519         kfree_skb(skb);
520         return -EINVAL;
521 }
522
523 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
524 {
525         to->pkt_type = from->pkt_type;
526         to->priority = from->priority;
527         to->protocol = from->protocol;
528         skb_dst_drop(to);
529         skb_dst_set(to, dst_clone(skb_dst(from)));
530         to->dev = from->dev;
531         to->mark = from->mark;
532
533 #ifdef CONFIG_NET_SCHED
534         to->tc_index = from->tc_index;
535 #endif
536         nf_copy(to, from);
537 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
538         to->nf_trace = from->nf_trace;
539 #endif
540         skb_copy_secmark(to, from);
541 }
542
543 static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
544 {
545         static u32 ip6_idents_hashrnd __read_mostly;
546         static bool hashrnd_initialized = false;
547         u32 hash, id;
548
549         if (unlikely(!hashrnd_initialized)) {
550                 hashrnd_initialized = true;
551                 get_random_bytes(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
552         }
553         hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
554         hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
555
556         id = ip_idents_reserve(hash, 1);
557         fhdr->identification = htonl(id);
558 }
559
560 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
561 {
562         struct sk_buff *frag;
563         struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
564         struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
565         struct ipv6hdr *tmp_hdr;
566         struct frag_hdr *fh;
567         unsigned int mtu, hlen, left, len;
568         int hroom, troom;
569         __be32 frag_id = 0;
570         int ptr, offset = 0, err=0;
571         u8 *prevhdr, nexthdr = 0;
572         struct net *net = dev_net(skb_dst(skb)->dev);
573
574         hlen = ip6_find_1stfragopt(skb, &prevhdr);
575         nexthdr = *prevhdr;
576
577         mtu = ip6_skb_dst_mtu(skb);
578
579         /* We must not fragment if the socket is set to force MTU discovery
580          * or if the skb it not generated by a local socket.
581          */
582         if (unlikely(!skb->local_df && skb->len > mtu) ||
583                      (IP6CB(skb)->frag_max_size &&
584                       IP6CB(skb)->frag_max_size > mtu)) {
585                 if (skb->sk && dst_allfrag(skb_dst(skb)))
586                         sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
587
588                 skb->dev = skb_dst(skb)->dev;
589                 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
590                 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
591                               IPSTATS_MIB_FRAGFAILS);
592                 kfree_skb(skb);
593                 return -EMSGSIZE;
594         }
595
596         if (np && np->frag_size < mtu) {
597                 if (np->frag_size)
598                         mtu = np->frag_size;
599         }
600         mtu -= hlen + sizeof(struct frag_hdr);
601
602         if (skb_has_frag_list(skb)) {
603                 int first_len = skb_pagelen(skb);
604                 struct sk_buff *frag2;
605
606                 if (first_len - hlen > mtu ||
607                     ((first_len - hlen) & 7) ||
608                     skb_cloned(skb))
609                         goto slow_path;
610
611                 skb_walk_frags(skb, frag) {
612                         /* Correct geometry. */
613                         if (frag->len > mtu ||
614                             ((frag->len & 7) && frag->next) ||
615                             skb_headroom(frag) < hlen)
616                                 goto slow_path_clean;
617
618                         /* Partially cloned skb? */
619                         if (skb_shared(frag))
620                                 goto slow_path_clean;
621
622                         BUG_ON(frag->sk);
623                         if (skb->sk) {
624                                 frag->sk = skb->sk;
625                                 frag->destructor = sock_wfree;
626                         }
627                         skb->truesize -= frag->truesize;
628                 }
629
630                 err = 0;
631                 offset = 0;
632                 frag = skb_shinfo(skb)->frag_list;
633                 skb_frag_list_init(skb);
634                 /* BUILD HEADER */
635
636                 *prevhdr = NEXTHDR_FRAGMENT;
637                 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
638                 if (!tmp_hdr) {
639                         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
640                                       IPSTATS_MIB_FRAGFAILS);
641                         return -ENOMEM;
642                 }
643
644                 __skb_pull(skb, hlen);
645                 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
646                 __skb_push(skb, hlen);
647                 skb_reset_network_header(skb);
648                 memcpy(skb_network_header(skb), tmp_hdr, hlen);
649
650                 ipv6_select_ident(fh, rt);
651                 fh->nexthdr = nexthdr;
652                 fh->reserved = 0;
653                 fh->frag_off = htons(IP6_MF);
654                 frag_id = fh->identification;
655
656                 first_len = skb_pagelen(skb);
657                 skb->data_len = first_len - skb_headlen(skb);
658                 skb->len = first_len;
659                 ipv6_hdr(skb)->payload_len = htons(first_len -
660                                                    sizeof(struct ipv6hdr));
661
662                 dst_hold(&rt->dst);
663
664                 for (;;) {
665                         /* Prepare header of the next frame,
666                          * before previous one went down. */
667                         if (frag) {
668                                 frag->ip_summed = CHECKSUM_NONE;
669                                 skb_reset_transport_header(frag);
670                                 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
671                                 __skb_push(frag, hlen);
672                                 skb_reset_network_header(frag);
673                                 memcpy(skb_network_header(frag), tmp_hdr,
674                                        hlen);
675                                 offset += skb->len - hlen - sizeof(struct frag_hdr);
676                                 fh->nexthdr = nexthdr;
677                                 fh->reserved = 0;
678                                 fh->frag_off = htons(offset);
679                                 if (frag->next != NULL)
680                                         fh->frag_off |= htons(IP6_MF);
681                                 fh->identification = frag_id;
682                                 ipv6_hdr(frag)->payload_len =
683                                                 htons(frag->len -
684                                                       sizeof(struct ipv6hdr));
685                                 ip6_copy_metadata(frag, skb);
686                         }
687
688                         err = output(skb);
689                         if(!err)
690                                 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
691                                               IPSTATS_MIB_FRAGCREATES);
692
693                         if (err || !frag)
694                                 break;
695
696                         skb = frag;
697                         frag = skb->next;
698                         skb->next = NULL;
699                 }
700
701                 kfree(tmp_hdr);
702
703                 if (err == 0) {
704                         IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
705                                       IPSTATS_MIB_FRAGOKS);
706                         ip6_rt_put(rt);
707                         return 0;
708                 }
709
710                 while (frag) {
711                         skb = frag->next;
712                         kfree_skb(frag);
713                         frag = skb;
714                 }
715
716                 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
717                               IPSTATS_MIB_FRAGFAILS);
718                 ip6_rt_put(rt);
719                 return err;
720
721 slow_path_clean:
722                 skb_walk_frags(skb, frag2) {
723                         if (frag2 == frag)
724                                 break;
725                         frag2->sk = NULL;
726                         frag2->destructor = NULL;
727                         skb->truesize += frag2->truesize;
728                 }
729         }
730
731 slow_path:
732         if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
733             skb_checksum_help(skb))
734                 goto fail;
735
736         left = skb->len - hlen;         /* Space per frame */
737         ptr = hlen;                     /* Where to start from */
738
739         /*
740          *      Fragment the datagram.
741          */
742
743         *prevhdr = NEXTHDR_FRAGMENT;
744         hroom = LL_RESERVED_SPACE(rt->dst.dev);
745         troom = rt->dst.dev->needed_tailroom;
746
747         /*
748          *      Keep copying data until we run out.
749          */
750         while(left > 0) {
751                 len = left;
752                 /* IF: it doesn't fit, use 'mtu' - the data space left */
753                 if (len > mtu)
754                         len = mtu;
755                 /* IF: we are not sending up to and including the packet end
756                    then align the next start on an eight byte boundary */
757                 if (len < left) {
758                         len &= ~7;
759                 }
760                 /*
761                  *      Allocate buffer.
762                  */
763
764                 if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
765                                       hroom + troom, GFP_ATOMIC)) == NULL) {
766                         NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
767                         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
768                                       IPSTATS_MIB_FRAGFAILS);
769                         err = -ENOMEM;
770                         goto fail;
771                 }
772
773                 /*
774                  *      Set up data on packet
775                  */
776
777                 ip6_copy_metadata(frag, skb);
778                 skb_reserve(frag, hroom);
779                 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
780                 skb_reset_network_header(frag);
781                 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
782                 frag->transport_header = (frag->network_header + hlen +
783                                           sizeof(struct frag_hdr));
784
785                 /*
786                  *      Charge the memory for the fragment to any owner
787                  *      it might possess
788                  */
789                 if (skb->sk)
790                         skb_set_owner_w(frag, skb->sk);
791
792                 /*
793                  *      Copy the packet header into the new buffer.
794                  */
795                 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
796
797                 /*
798                  *      Build fragment header.
799                  */
800                 fh->nexthdr = nexthdr;
801                 fh->reserved = 0;
802                 if (!frag_id) {
803                         ipv6_select_ident(fh, rt);
804                         frag_id = fh->identification;
805                 } else
806                         fh->identification = frag_id;
807
808                 /*
809                  *      Copy a block of the IP datagram.
810                  */
811                 if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
812                         BUG();
813                 left -= len;
814
815                 fh->frag_off = htons(offset);
816                 if (left > 0)
817                         fh->frag_off |= htons(IP6_MF);
818                 ipv6_hdr(frag)->payload_len = htons(frag->len -
819                                                     sizeof(struct ipv6hdr));
820
821                 ptr += len;
822                 offset += len;
823
824                 /*
825                  *      Put this fragment into the sending queue.
826                  */
827                 err = output(frag);
828                 if (err)
829                         goto fail;
830
831                 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
832                               IPSTATS_MIB_FRAGCREATES);
833         }
834         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
835                       IPSTATS_MIB_FRAGOKS);
836         consume_skb(skb);
837         return err;
838
839 fail:
840         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
841                       IPSTATS_MIB_FRAGFAILS);
842         kfree_skb(skb);
843         return err;
844 }
845
846 static inline int ip6_rt_check(const struct rt6key *rt_key,
847                                const struct in6_addr *fl_addr,
848                                const struct in6_addr *addr_cache)
849 {
850         return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
851                 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
852 }
853
854 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
855                                           struct dst_entry *dst,
856                                           const struct flowi6 *fl6)
857 {
858         struct ipv6_pinfo *np = inet6_sk(sk);
859         struct rt6_info *rt;
860
861         if (!dst)
862                 goto out;
863
864         if (dst->ops->family != AF_INET6) {
865                 dst_release(dst);
866                 return NULL;
867         }
868
869         rt = (struct rt6_info *)dst;
870         /* Yes, checking route validity in not connected
871          * case is not very simple. Take into account,
872          * that we do not support routing by source, TOS,
873          * and MSG_DONTROUTE            --ANK (980726)
874          *
875          * 1. ip6_rt_check(): If route was host route,
876          *    check that cached destination is current.
877          *    If it is network route, we still may
878          *    check its validity using saved pointer
879          *    to the last used address: daddr_cache.
880          *    We do not want to save whole address now,
881          *    (because main consumer of this service
882          *    is tcp, which has not this problem),
883          *    so that the last trick works only on connected
884          *    sockets.
885          * 2. oif also should be the same.
886          */
887         if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
888 #ifdef CONFIG_IPV6_SUBTREES
889             ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
890 #endif
891             (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
892                 dst_release(dst);
893                 dst = NULL;
894         }
895
896 out:
897         return dst;
898 }
899
900 static int ip6_dst_lookup_tail(struct sock *sk,
901                                struct dst_entry **dst, struct flowi6 *fl6)
902 {
903         struct net *net = sock_net(sk);
904 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
905         struct neighbour *n;
906         struct rt6_info *rt;
907 #endif
908         int err;
909
910         if (*dst == NULL)
911                 *dst = ip6_route_output(net, sk, fl6);
912
913         if ((err = (*dst)->error))
914                 goto out_err_release;
915
916         if (ipv6_addr_any(&fl6->saddr)) {
917                 struct rt6_info *rt = (struct rt6_info *) *dst;
918                 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
919                                           sk ? inet6_sk(sk)->srcprefs : 0,
920                                           &fl6->saddr);
921                 if (err)
922                         goto out_err_release;
923         }
924
925 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
926         /*
927          * Here if the dst entry we've looked up
928          * has a neighbour entry that is in the INCOMPLETE
929          * state and the src address from the flow is
930          * marked as OPTIMISTIC, we release the found
931          * dst entry and replace it instead with the
932          * dst entry of the nexthop router
933          */
934         rt = (struct rt6_info *) *dst;
935         rcu_read_lock_bh();
936         n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
937         err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
938         rcu_read_unlock_bh();
939
940         if (err) {
941                 struct inet6_ifaddr *ifp;
942                 struct flowi6 fl_gw6;
943                 int redirect;
944
945                 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
946                                       (*dst)->dev, 1);
947
948                 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
949                 if (ifp)
950                         in6_ifa_put(ifp);
951
952                 if (redirect) {
953                         /*
954                          * We need to get the dst entry for the
955                          * default router instead
956                          */
957                         dst_release(*dst);
958                         memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
959                         memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
960                         *dst = ip6_route_output(net, sk, &fl_gw6);
961                         if ((err = (*dst)->error))
962                                 goto out_err_release;
963                 }
964         }
965 #endif
966
967         return 0;
968
969 out_err_release:
970         if (err == -ENETUNREACH)
971                 IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES);
972         dst_release(*dst);
973         *dst = NULL;
974         return err;
975 }
976
977 /**
978  *      ip6_dst_lookup - perform route lookup on flow
979  *      @sk: socket which provides route info
980  *      @dst: pointer to dst_entry * for result
981  *      @fl6: flow to lookup
982  *
983  *      This function performs a route lookup on the given flow.
984  *
985  *      It returns zero on success, or a standard errno code on error.
986  */
987 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
988 {
989         *dst = NULL;
990         return ip6_dst_lookup_tail(sk, dst, fl6);
991 }
992 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
993
994 /**
995  *      ip6_dst_lookup_flow - perform route lookup on flow with ipsec
996  *      @sk: socket which provides route info
997  *      @fl6: flow to lookup
998  *      @final_dst: final destination address for ipsec lookup
999  *      @can_sleep: we are in a sleepable context
1000  *
1001  *      This function performs a route lookup on the given flow.
1002  *
1003  *      It returns a valid dst pointer on success, or a pointer encoded
1004  *      error code.
1005  */
1006 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1007                                       const struct in6_addr *final_dst,
1008                                       bool can_sleep)
1009 {
1010         struct dst_entry *dst = NULL;
1011         int err;
1012
1013         err = ip6_dst_lookup_tail(sk, &dst, fl6);
1014         if (err)
1015                 return ERR_PTR(err);
1016         if (final_dst)
1017                 fl6->daddr = *final_dst;
1018         if (can_sleep)
1019                 fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1020
1021         return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1022 }
1023 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1024
1025 /**
1026  *      ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1027  *      @sk: socket which provides the dst cache and route info
1028  *      @fl6: flow to lookup
1029  *      @final_dst: final destination address for ipsec lookup
1030  *      @can_sleep: we are in a sleepable context
1031  *
1032  *      This function performs a route lookup on the given flow with the
1033  *      possibility of using the cached route in the socket if it is valid.
1034  *      It will take the socket dst lock when operating on the dst cache.
1035  *      As a result, this function can only be used in process context.
1036  *
1037  *      It returns a valid dst pointer on success, or a pointer encoded
1038  *      error code.
1039  */
1040 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1041                                          const struct in6_addr *final_dst,
1042                                          bool can_sleep)
1043 {
1044         struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1045         int err;
1046
1047         dst = ip6_sk_dst_check(sk, dst, fl6);
1048
1049         err = ip6_dst_lookup_tail(sk, &dst, fl6);
1050         if (err)
1051                 return ERR_PTR(err);
1052         if (final_dst)
1053                 fl6->daddr = *final_dst;
1054         if (can_sleep)
1055                 fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1056
1057         return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1058 }
1059 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1060
1061 static inline int ip6_ufo_append_data(struct sock *sk,
1062                         int getfrag(void *from, char *to, int offset, int len,
1063                         int odd, struct sk_buff *skb),
1064                         void *from, int length, int hh_len, int fragheaderlen,
1065                         int transhdrlen, int mtu,unsigned int flags,
1066                         struct rt6_info *rt)
1067
1068 {
1069         struct sk_buff *skb;
1070         int err;
1071
1072         /* There is support for UDP large send offload by network
1073          * device, so create one single skb packet containing complete
1074          * udp datagram
1075          */
1076         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1077                 struct frag_hdr fhdr;
1078
1079                 skb = sock_alloc_send_skb(sk,
1080                         hh_len + fragheaderlen + transhdrlen + 20,
1081                         (flags & MSG_DONTWAIT), &err);
1082                 if (skb == NULL)
1083                         return err;
1084
1085                 /* reserve space for Hardware header */
1086                 skb_reserve(skb, hh_len);
1087
1088                 /* create space for UDP/IP header */
1089                 skb_put(skb,fragheaderlen + transhdrlen);
1090
1091                 /* initialize network header pointer */
1092                 skb_reset_network_header(skb);
1093
1094                 /* initialize protocol header pointer */
1095                 skb->transport_header = skb->network_header + fragheaderlen;
1096
1097                 skb->ip_summed = CHECKSUM_PARTIAL;
1098                 skb->csum = 0;
1099
1100                 /* Specify the length of each IPv6 datagram fragment.
1101                  * It has to be a multiple of 8.
1102                  */
1103                 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1104                                              sizeof(struct frag_hdr)) & ~7;
1105                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1106                 ipv6_select_ident(&fhdr, rt);
1107                 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1108                 __skb_queue_tail(&sk->sk_write_queue, skb);
1109         }
1110
1111         return skb_append_datato_frags(sk, skb, getfrag, from,
1112                                        (length - transhdrlen));
1113 }
1114
1115 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1116                                                gfp_t gfp)
1117 {
1118         return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1119 }
1120
1121 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1122                                                 gfp_t gfp)
1123 {
1124         return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1125 }
1126
1127 static void ip6_append_data_mtu(unsigned int *mtu,
1128                                 int *maxfraglen,
1129                                 unsigned int fragheaderlen,
1130                                 struct sk_buff *skb,
1131                                 struct rt6_info *rt,
1132                                 unsigned int orig_mtu)
1133 {
1134         if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1135                 if (skb == NULL) {
1136                         /* first fragment, reserve header_len */
1137                         *mtu = orig_mtu - rt->dst.header_len;
1138
1139                 } else {
1140                         /*
1141                          * this fragment is not first, the headers
1142                          * space is regarded as data space.
1143                          */
1144                         *mtu = orig_mtu;
1145                 }
1146                 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1147                               + fragheaderlen - sizeof(struct frag_hdr);
1148         }
1149 }
1150
1151 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1152         int offset, int len, int odd, struct sk_buff *skb),
1153         void *from, int length, int transhdrlen,
1154         int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1155         struct rt6_info *rt, unsigned int flags, int dontfrag)
1156 {
1157         struct inet_sock *inet = inet_sk(sk);
1158         struct ipv6_pinfo *np = inet6_sk(sk);
1159         struct inet_cork *cork;
1160         struct sk_buff *skb, *skb_prev = NULL;
1161         unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1162         int exthdrlen;
1163         int dst_exthdrlen;
1164         int hh_len;
1165         int copy;
1166         int err;
1167         int offset = 0;
1168         __u8 tx_flags = 0;
1169
1170         if (flags&MSG_PROBE)
1171                 return 0;
1172         cork = &inet->cork.base;
1173         if (skb_queue_empty(&sk->sk_write_queue)) {
1174                 /*
1175                  * setup for corking
1176                  */
1177                 if (opt) {
1178                         if (WARN_ON(np->cork.opt))
1179                                 return -EINVAL;
1180
1181                         np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
1182                         if (unlikely(np->cork.opt == NULL))
1183                                 return -ENOBUFS;
1184
1185                         np->cork.opt->tot_len = opt->tot_len;
1186                         np->cork.opt->opt_flen = opt->opt_flen;
1187                         np->cork.opt->opt_nflen = opt->opt_nflen;
1188
1189                         np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1190                                                             sk->sk_allocation);
1191                         if (opt->dst0opt && !np->cork.opt->dst0opt)
1192                                 return -ENOBUFS;
1193
1194                         np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1195                                                             sk->sk_allocation);
1196                         if (opt->dst1opt && !np->cork.opt->dst1opt)
1197                                 return -ENOBUFS;
1198
1199                         np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1200                                                            sk->sk_allocation);
1201                         if (opt->hopopt && !np->cork.opt->hopopt)
1202                                 return -ENOBUFS;
1203
1204                         np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1205                                                             sk->sk_allocation);
1206                         if (opt->srcrt && !np->cork.opt->srcrt)
1207                                 return -ENOBUFS;
1208
1209                         /* need source address above miyazawa*/
1210                 }
1211                 dst_hold(&rt->dst);
1212                 cork->dst = &rt->dst;
1213                 inet->cork.fl.u.ip6 = *fl6;
1214                 np->cork.hop_limit = hlimit;
1215                 np->cork.tclass = tclass;
1216                 if (rt->dst.flags & DST_XFRM_TUNNEL)
1217                         mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1218                               rt->dst.dev->mtu : dst_mtu(&rt->dst);
1219                 else
1220                         mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1221                               rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1222                 if (np->frag_size < mtu) {
1223                         if (np->frag_size)
1224                                 mtu = np->frag_size;
1225                 }
1226                 cork->fragsize = mtu;
1227                 if (dst_allfrag(rt->dst.path))
1228                         cork->flags |= IPCORK_ALLFRAG;
1229                 cork->length = 0;
1230                 exthdrlen = (opt ? opt->opt_flen : 0);
1231                 length += exthdrlen;
1232                 transhdrlen += exthdrlen;
1233                 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1234         } else {
1235                 rt = (struct rt6_info *)cork->dst;
1236                 fl6 = &inet->cork.fl.u.ip6;
1237                 opt = np->cork.opt;
1238                 transhdrlen = 0;
1239                 exthdrlen = 0;
1240                 dst_exthdrlen = 0;
1241                 mtu = cork->fragsize;
1242         }
1243         orig_mtu = mtu;
1244
1245         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1246
1247         fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1248                         (opt ? opt->opt_nflen : 0);
1249         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
1250
1251         if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1252                 if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1253                         ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
1254                         return -EMSGSIZE;
1255                 }
1256         }
1257
1258         /* For UDP, check if TX timestamp is enabled */
1259         if (sk->sk_type == SOCK_DGRAM)
1260                 sock_tx_timestamp(sk, &tx_flags);
1261
1262         /*
1263          * Let's try using as much space as possible.
1264          * Use MTU if total length of the message fits into the MTU.
1265          * Otherwise, we need to reserve fragment header and
1266          * fragment alignment (= 8-15 octects, in total).
1267          *
1268          * Note that we may need to "move" the data from the tail of
1269          * of the buffer to the new fragment when we split
1270          * the message.
1271          *
1272          * FIXME: It may be fragmented into multiple chunks
1273          *        at once if non-fragmentable extension headers
1274          *        are too large.
1275          * --yoshfuji
1276          */
1277
1278         if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
1279                                            sk->sk_protocol == IPPROTO_RAW)) {
1280                 ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1281                 return -EMSGSIZE;
1282         }
1283
1284         skb = skb_peek_tail(&sk->sk_write_queue);
1285         cork->length += length;
1286         if (((length > mtu) ||
1287              (skb && skb_has_frags(skb))) &&
1288             (sk->sk_protocol == IPPROTO_UDP) &&
1289             (rt->dst.dev->features & NETIF_F_UFO)) {
1290                 err = ip6_ufo_append_data(sk, getfrag, from, length,
1291                                           hh_len, fragheaderlen,
1292                                           transhdrlen, mtu, flags, rt);
1293                 if (err)
1294                         goto error;
1295                 return 0;
1296         }
1297
1298         if (!skb)
1299                 goto alloc_new_skb;
1300
1301         while (length > 0) {
1302                 /* Check if the remaining data fits into current packet. */
1303                 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1304                 if (copy < length)
1305                         copy = maxfraglen - skb->len;
1306
1307                 if (copy <= 0) {
1308                         char *data;
1309                         unsigned int datalen;
1310                         unsigned int fraglen;
1311                         unsigned int fraggap;
1312                         unsigned int alloclen;
1313 alloc_new_skb:
1314                         /* There's no room in the current skb */
1315                         if (skb)
1316                                 fraggap = skb->len - maxfraglen;
1317                         else
1318                                 fraggap = 0;
1319                         /* update mtu and maxfraglen if necessary */
1320                         if (skb == NULL || skb_prev == NULL)
1321                                 ip6_append_data_mtu(&mtu, &maxfraglen,
1322                                                     fragheaderlen, skb, rt,
1323                                                     orig_mtu);
1324
1325                         skb_prev = skb;
1326
1327                         /*
1328                          * If remaining data exceeds the mtu,
1329                          * we know we need more fragment(s).
1330                          */
1331                         datalen = length + fraggap;
1332
1333                         if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1334                                 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1335                         if ((flags & MSG_MORE) &&
1336                             !(rt->dst.dev->features&NETIF_F_SG))
1337                                 alloclen = mtu;
1338                         else
1339                                 alloclen = datalen + fragheaderlen;
1340
1341                         alloclen += dst_exthdrlen;
1342
1343                         if (datalen != length + fraggap) {
1344                                 /*
1345                                  * this is not the last fragment, the trailer
1346                                  * space is regarded as data space.
1347                                  */
1348                                 datalen += rt->dst.trailer_len;
1349                         }
1350
1351                         alloclen += rt->dst.trailer_len;
1352                         fraglen = datalen + fragheaderlen;
1353
1354                         /*
1355                          * We just reserve space for fragment header.
1356                          * Note: this may be overallocation if the message
1357                          * (without MSG_MORE) fits into the MTU.
1358                          */
1359                         alloclen += sizeof(struct frag_hdr);
1360
1361                         if (transhdrlen) {
1362                                 skb = sock_alloc_send_skb(sk,
1363                                                 alloclen + hh_len,
1364                                                 (flags & MSG_DONTWAIT), &err);
1365                         } else {
1366                                 skb = NULL;
1367                                 if (atomic_read(&sk->sk_wmem_alloc) <=
1368                                     2 * sk->sk_sndbuf)
1369                                         skb = sock_wmalloc(sk,
1370                                                            alloclen + hh_len, 1,
1371                                                            sk->sk_allocation);
1372                                 if (unlikely(skb == NULL))
1373                                         err = -ENOBUFS;
1374                                 else {
1375                                         /* Only the initial fragment
1376                                          * is time stamped.
1377                                          */
1378                                         tx_flags = 0;
1379                                 }
1380                         }
1381                         if (skb == NULL)
1382                                 goto error;
1383                         /*
1384                          *      Fill in the control structures
1385                          */
1386                         skb->ip_summed = CHECKSUM_NONE;
1387                         skb->csum = 0;
1388                         /* reserve for fragmentation and ipsec header */
1389                         skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1390                                     dst_exthdrlen);
1391
1392                         if (sk->sk_type == SOCK_DGRAM)
1393                                 skb_shinfo(skb)->tx_flags = tx_flags;
1394
1395                         /*
1396                          *      Find where to start putting bytes
1397                          */
1398                         data = skb_put(skb, fraglen);
1399                         skb_set_network_header(skb, exthdrlen);
1400                         data += fragheaderlen;
1401                         skb->transport_header = (skb->network_header +
1402                                                  fragheaderlen);
1403                         if (fraggap) {
1404                                 skb->csum = skb_copy_and_csum_bits(
1405                                         skb_prev, maxfraglen,
1406                                         data + transhdrlen, fraggap, 0);
1407                                 skb_prev->csum = csum_sub(skb_prev->csum,
1408                                                           skb->csum);
1409                                 data += fraggap;
1410                                 pskb_trim_unique(skb_prev, maxfraglen);
1411                         }
1412                         copy = datalen - transhdrlen - fraggap;
1413
1414                         if (copy < 0) {
1415                                 err = -EINVAL;
1416                                 kfree_skb(skb);
1417                                 goto error;
1418                         } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1419                                 err = -EFAULT;
1420                                 kfree_skb(skb);
1421                                 goto error;
1422                         }
1423
1424                         offset += copy;
1425                         length -= datalen - fraggap;
1426                         transhdrlen = 0;
1427                         exthdrlen = 0;
1428                         dst_exthdrlen = 0;
1429
1430                         /*
1431                          * Put the packet on the pending queue
1432                          */
1433                         __skb_queue_tail(&sk->sk_write_queue, skb);
1434                         continue;
1435                 }
1436
1437                 if (copy > length)
1438                         copy = length;
1439
1440                 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1441                         unsigned int off;
1442
1443                         off = skb->len;
1444                         if (getfrag(from, skb_put(skb, copy),
1445                                                 offset, copy, off, skb) < 0) {
1446                                 __skb_trim(skb, off);
1447                                 err = -EFAULT;
1448                                 goto error;
1449                         }
1450                 } else {
1451                         int i = skb_shinfo(skb)->nr_frags;
1452                         struct page_frag *pfrag = sk_page_frag(sk);
1453
1454                         err = -ENOMEM;
1455                         if (!sk_page_frag_refill(sk, pfrag))
1456                                 goto error;
1457
1458                         if (!skb_can_coalesce(skb, i, pfrag->page,
1459                                               pfrag->offset)) {
1460                                 err = -EMSGSIZE;
1461                                 if (i == MAX_SKB_FRAGS)
1462                                         goto error;
1463
1464                                 __skb_fill_page_desc(skb, i, pfrag->page,
1465                                                      pfrag->offset, 0);
1466                                 skb_shinfo(skb)->nr_frags = ++i;
1467                                 get_page(pfrag->page);
1468                         }
1469                         copy = min_t(int, copy, pfrag->size - pfrag->offset);
1470                         if (getfrag(from,
1471                                     page_address(pfrag->page) + pfrag->offset,
1472                                     offset, copy, skb->len, skb) < 0)
1473                                 goto error_efault;
1474
1475                         pfrag->offset += copy;
1476                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1477                         skb->len += copy;
1478                         skb->data_len += copy;
1479                         skb->truesize += copy;
1480                         atomic_add(copy, &sk->sk_wmem_alloc);
1481                 }
1482                 offset += copy;
1483                 length -= copy;
1484         }
1485
1486         return 0;
1487
1488 error_efault:
1489         err = -EFAULT;
1490 error:
1491         cork->length -= length;
1492         IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1493         return err;
1494 }
1495 EXPORT_SYMBOL_GPL(ip6_append_data);
1496
1497 static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1498 {
1499         if (np->cork.opt) {
1500                 kfree(np->cork.opt->dst0opt);
1501                 kfree(np->cork.opt->dst1opt);
1502                 kfree(np->cork.opt->hopopt);
1503                 kfree(np->cork.opt->srcrt);
1504                 kfree(np->cork.opt);
1505                 np->cork.opt = NULL;
1506         }
1507
1508         if (inet->cork.base.dst) {
1509                 dst_release(inet->cork.base.dst);
1510                 inet->cork.base.dst = NULL;
1511                 inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1512         }
1513         memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1514 }
1515
1516 int ip6_push_pending_frames(struct sock *sk)
1517 {
1518         struct sk_buff *skb, *tmp_skb;
1519         struct sk_buff **tail_skb;
1520         struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1521         struct inet_sock *inet = inet_sk(sk);
1522         struct ipv6_pinfo *np = inet6_sk(sk);
1523         struct net *net = sock_net(sk);
1524         struct ipv6hdr *hdr;
1525         struct ipv6_txoptions *opt = np->cork.opt;
1526         struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1527         struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1528         unsigned char proto = fl6->flowi6_proto;
1529         int err = 0;
1530
1531         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1532                 goto out;
1533         tail_skb = &(skb_shinfo(skb)->frag_list);
1534
1535         /* move skb->data to ip header from ext header */
1536         if (skb->data < skb_network_header(skb))
1537                 __skb_pull(skb, skb_network_offset(skb));
1538         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1539                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1540                 *tail_skb = tmp_skb;
1541                 tail_skb = &(tmp_skb->next);
1542                 skb->len += tmp_skb->len;
1543                 skb->data_len += tmp_skb->len;
1544                 skb->truesize += tmp_skb->truesize;
1545                 tmp_skb->destructor = NULL;
1546                 tmp_skb->sk = NULL;
1547         }
1548
1549         /* Allow local fragmentation. */
1550         if (np->pmtudisc < IPV6_PMTUDISC_DO)
1551                 skb->local_df = 1;
1552
1553         *final_dst = fl6->daddr;
1554         __skb_pull(skb, skb_network_header_len(skb));
1555         if (opt && opt->opt_flen)
1556                 ipv6_push_frag_opts(skb, opt, &proto);
1557         if (opt && opt->opt_nflen)
1558                 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1559
1560         skb_push(skb, sizeof(struct ipv6hdr));
1561         skb_reset_network_header(skb);
1562         hdr = ipv6_hdr(skb);
1563
1564         ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
1565         hdr->hop_limit = np->cork.hop_limit;
1566         hdr->nexthdr = proto;
1567         hdr->saddr = fl6->saddr;
1568         hdr->daddr = *final_dst;
1569
1570         skb->priority = sk->sk_priority;
1571         skb->mark = sk->sk_mark;
1572
1573         skb_dst_set(skb, dst_clone(&rt->dst));
1574         IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1575         if (proto == IPPROTO_ICMPV6) {
1576                 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1577
1578                 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1579                 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1580         }
1581
1582         err = ip6_local_out(skb);
1583         if (err) {
1584                 if (err > 0)
1585                         err = net_xmit_errno(err);
1586                 if (err)
1587                         goto error;
1588         }
1589
1590 out:
1591         ip6_cork_release(inet, np);
1592         return err;
1593 error:
1594         IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1595         goto out;
1596 }
1597 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1598
1599 void ip6_flush_pending_frames(struct sock *sk)
1600 {
1601         struct sk_buff *skb;
1602
1603         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1604                 if (skb_dst(skb))
1605                         IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1606                                       IPSTATS_MIB_OUTDISCARDS);
1607                 kfree_skb(skb);
1608         }
1609
1610         ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1611 }
1612 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);