2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
59 static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
61 struct dst_entry *dst = skb_dst(skb);
62 struct net_device *dev = dst->dev;
63 struct neighbour *neigh;
64 struct in6_addr *nexthop;
67 skb->protocol = htons(ETH_P_IPV6);
70 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
71 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
73 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
74 ((mroute6_socket(dev_net(dev), skb) &&
75 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
76 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
77 &ipv6_hdr(skb)->saddr))) {
78 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
80 /* Do not check for IFF_ALLMULTI; multicast routing
81 is not supported in any case.
84 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
85 sk, newskb, NULL, newskb->dev,
88 if (ipv6_hdr(skb)->hop_limit == 0) {
89 IP6_INC_STATS(dev_net(dev), idev,
90 IPSTATS_MIB_OUTDISCARDS);
96 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
99 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 IPV6_ADDR_SCOPE_NODELOCAL &&
101 !(dev->flags & IFF_LOOPBACK)) {
108 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
109 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 if (unlikely(!neigh))
111 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 if (!IS_ERR(neigh)) {
113 ret = dst_neigh_output(dst, neigh, skb);
114 rcu_read_unlock_bh();
117 rcu_read_unlock_bh();
119 IP6_INC_STATS(dev_net(dst->dev),
120 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
125 static int ip6_finish_output(struct sock *sk, struct sk_buff *skb)
127 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
128 dst_allfrag(skb_dst(skb)) ||
129 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
130 return ip6_fragment(sk, skb, ip6_finish_output2);
132 return ip6_finish_output2(sk, skb);
135 int ip6_output(struct sock *sk, struct sk_buff *skb)
137 struct net_device *dev = skb_dst(skb)->dev;
138 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
139 if (unlikely(idev->cnf.disable_ipv6)) {
140 IP6_INC_STATS(dev_net(dev), idev,
141 IPSTATS_MIB_OUTDISCARDS);
146 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb,
149 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
153 * xmit an sk_buff (used by TCP, SCTP and DCCP)
156 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
157 struct ipv6_txoptions *opt, int tclass)
159 struct net *net = sock_net(sk);
160 struct ipv6_pinfo *np = inet6_sk(sk);
161 struct in6_addr *first_hop = &fl6->daddr;
162 struct dst_entry *dst = skb_dst(skb);
164 u8 proto = fl6->flowi6_proto;
165 int seg_len = skb->len;
170 unsigned int head_room;
172 /* First: exthdrs may take lots of space (~8K for now)
173 MAX_HEADER is not enough.
175 head_room = opt->opt_nflen + opt->opt_flen;
176 seg_len += head_room;
177 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
179 if (skb_headroom(skb) < head_room) {
180 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
182 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
183 IPSTATS_MIB_OUTDISCARDS);
189 skb_set_owner_w(skb, sk);
192 ipv6_push_frag_opts(skb, opt, &proto);
194 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
197 skb_push(skb, sizeof(struct ipv6hdr));
198 skb_reset_network_header(skb);
202 * Fill in the IPv6 header
205 hlimit = np->hop_limit;
207 hlimit = ip6_dst_hoplimit(dst);
209 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
210 np->autoflowlabel, fl6));
212 hdr->payload_len = htons(seg_len);
213 hdr->nexthdr = proto;
214 hdr->hop_limit = hlimit;
216 hdr->saddr = fl6->saddr;
217 hdr->daddr = *first_hop;
219 skb->protocol = htons(ETH_P_IPV6);
220 skb->priority = sk->sk_priority;
221 skb->mark = sk->sk_mark;
224 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
225 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
226 IPSTATS_MIB_OUT, skb->len);
227 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
228 NULL, dst->dev, dst_output_sk);
232 ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
233 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
237 EXPORT_SYMBOL(ip6_xmit);
239 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
241 struct ip6_ra_chain *ra;
242 struct sock *last = NULL;
244 read_lock(&ip6_ra_lock);
245 for (ra = ip6_ra_chain; ra; ra = ra->next) {
246 struct sock *sk = ra->sk;
247 if (sk && ra->sel == sel &&
248 (!sk->sk_bound_dev_if ||
249 sk->sk_bound_dev_if == skb->dev->ifindex)) {
251 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
253 rawv6_rcv(last, skb2);
260 rawv6_rcv(last, skb);
261 read_unlock(&ip6_ra_lock);
264 read_unlock(&ip6_ra_lock);
268 static int ip6_forward_proxy_check(struct sk_buff *skb)
270 struct ipv6hdr *hdr = ipv6_hdr(skb);
271 u8 nexthdr = hdr->nexthdr;
275 if (ipv6_ext_hdr(nexthdr)) {
276 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
280 offset = sizeof(struct ipv6hdr);
282 if (nexthdr == IPPROTO_ICMPV6) {
283 struct icmp6hdr *icmp6;
285 if (!pskb_may_pull(skb, (skb_network_header(skb) +
286 offset + 1 - skb->data)))
289 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
291 switch (icmp6->icmp6_type) {
292 case NDISC_ROUTER_SOLICITATION:
293 case NDISC_ROUTER_ADVERTISEMENT:
294 case NDISC_NEIGHBOUR_SOLICITATION:
295 case NDISC_NEIGHBOUR_ADVERTISEMENT:
297 /* For reaction involving unicast neighbor discovery
298 * message destined to the proxied address, pass it to
308 * The proxying router can't forward traffic sent to a link-local
309 * address, so signal the sender and discard the packet. This
310 * behavior is clarified by the MIPv6 specification.
312 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
313 dst_link_failure(skb);
320 static inline int ip6_forward_finish(struct sock *sk, struct sk_buff *skb)
322 skb_sender_cpu_clear(skb);
323 return dst_output_sk(sk, skb);
326 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
329 struct inet6_dev *idev;
331 if (dst_metric_locked(dst, RTAX_MTU)) {
332 mtu = dst_metric_raw(dst, RTAX_MTU);
339 idev = __in6_dev_get(dst->dev);
341 mtu = idev->cnf.mtu6;
347 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
352 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
353 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
359 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
365 int ip6_forward(struct sk_buff *skb)
367 struct dst_entry *dst = skb_dst(skb);
368 struct ipv6hdr *hdr = ipv6_hdr(skb);
369 struct inet6_skb_parm *opt = IP6CB(skb);
370 struct net *net = dev_net(dst->dev);
373 if (net->ipv6.devconf_all->forwarding == 0)
376 if (skb->pkt_type != PACKET_HOST)
379 if (unlikely(skb->sk))
382 if (skb_warn_if_lro(skb))
385 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
386 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
387 IPSTATS_MIB_INDISCARDS);
391 skb_forward_csum(skb);
394 * We DO NOT make any processing on
395 * RA packets, pushing them to user level AS IS
396 * without ane WARRANTY that application will be able
397 * to interpret them. The reason is that we
398 * cannot make anything clever here.
400 * We are not end-node, so that if packet contains
401 * AH/ESP, we cannot make anything.
402 * Defragmentation also would be mistake, RA packets
403 * cannot be fragmented, because there is no warranty
404 * that different fragments will go along one path. --ANK
406 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
407 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
412 * check and decrement ttl
414 if (hdr->hop_limit <= 1) {
415 /* Force OUTPUT device used as source address */
417 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
418 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
419 IPSTATS_MIB_INHDRERRORS);
425 /* XXX: idev->cnf.proxy_ndp? */
426 if (net->ipv6.devconf_all->proxy_ndp &&
427 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
428 int proxied = ip6_forward_proxy_check(skb);
430 return ip6_input(skb);
431 else if (proxied < 0) {
432 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
433 IPSTATS_MIB_INDISCARDS);
438 if (!xfrm6_route_forward(skb)) {
439 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
440 IPSTATS_MIB_INDISCARDS);
445 /* IPv6 specs say nothing about it, but it is clear that we cannot
446 send redirects to source routed frames.
447 We don't send redirects to frames decapsulated from IPsec.
449 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
450 struct in6_addr *target = NULL;
451 struct inet_peer *peer;
455 * incoming and outgoing devices are the same
459 rt = (struct rt6_info *) dst;
460 if (rt->rt6i_flags & RTF_GATEWAY)
461 target = &rt->rt6i_gateway;
463 target = &hdr->daddr;
465 peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
467 /* Limit redirects both by destination (here)
468 and by source (inside ndisc_send_redirect)
470 if (inet_peer_xrlim_allow(peer, 1*HZ))
471 ndisc_send_redirect(skb, target);
475 int addrtype = ipv6_addr_type(&hdr->saddr);
477 /* This check is security critical. */
478 if (addrtype == IPV6_ADDR_ANY ||
479 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
481 if (addrtype & IPV6_ADDR_LINKLOCAL) {
482 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
483 ICMPV6_NOT_NEIGHBOUR, 0);
488 mtu = ip6_dst_mtu_forward(dst);
489 if (mtu < IPV6_MIN_MTU)
492 if (ip6_pkt_too_big(skb, mtu)) {
493 /* Again, force OUTPUT device used as source address */
495 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
496 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
497 IPSTATS_MIB_INTOOBIGERRORS);
498 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
499 IPSTATS_MIB_FRAGFAILS);
504 if (skb_cow(skb, dst->dev->hard_header_len)) {
505 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
506 IPSTATS_MIB_OUTDISCARDS);
512 /* Mangling hops number delayed to point after skb COW */
516 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
517 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
518 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb,
523 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
529 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
531 to->pkt_type = from->pkt_type;
532 to->priority = from->priority;
533 to->protocol = from->protocol;
535 skb_dst_set(to, dst_clone(skb_dst(from)));
537 to->mark = from->mark;
539 #ifdef CONFIG_NET_SCHED
540 to->tc_index = from->tc_index;
543 skb_copy_secmark(to, from);
546 int ip6_fragment(struct sock *sk, struct sk_buff *skb,
547 int (*output)(struct sock *, struct sk_buff *))
549 struct sk_buff *frag;
550 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
551 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
552 inet6_sk(skb->sk) : NULL;
553 struct ipv6hdr *tmp_hdr;
555 unsigned int mtu, hlen, left, len;
558 int ptr, offset = 0, err = 0;
559 u8 *prevhdr, nexthdr = 0;
560 struct net *net = dev_net(skb_dst(skb)->dev);
562 hlen = ip6_find_1stfragopt(skb, &prevhdr);
565 mtu = ip6_skb_dst_mtu(skb);
567 /* We must not fragment if the socket is set to force MTU discovery
568 * or if the skb it not generated by a local socket.
570 if (unlikely(!skb->ignore_df && skb->len > mtu))
573 if (IP6CB(skb)->frag_max_size) {
574 if (IP6CB(skb)->frag_max_size > mtu)
577 /* don't send fragments larger than what we received */
578 mtu = IP6CB(skb)->frag_max_size;
579 if (mtu < IPV6_MIN_MTU)
583 if (np && np->frag_size < mtu) {
587 mtu -= hlen + sizeof(struct frag_hdr);
589 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
590 &ipv6_hdr(skb)->saddr);
592 hroom = LL_RESERVED_SPACE(rt->dst.dev);
593 if (skb_has_frag_list(skb)) {
594 int first_len = skb_pagelen(skb);
595 struct sk_buff *frag2;
597 if (first_len - hlen > mtu ||
598 ((first_len - hlen) & 7) ||
600 skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
603 skb_walk_frags(skb, frag) {
604 /* Correct geometry. */
605 if (frag->len > mtu ||
606 ((frag->len & 7) && frag->next) ||
607 skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
608 goto slow_path_clean;
610 /* Partially cloned skb? */
611 if (skb_shared(frag))
612 goto slow_path_clean;
617 frag->destructor = sock_wfree;
619 skb->truesize -= frag->truesize;
626 *prevhdr = NEXTHDR_FRAGMENT;
627 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
629 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
630 IPSTATS_MIB_FRAGFAILS);
634 frag = skb_shinfo(skb)->frag_list;
635 skb_frag_list_init(skb);
637 __skb_pull(skb, hlen);
638 fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
639 __skb_push(skb, hlen);
640 skb_reset_network_header(skb);
641 memcpy(skb_network_header(skb), tmp_hdr, hlen);
643 fh->nexthdr = nexthdr;
645 fh->frag_off = htons(IP6_MF);
646 fh->identification = frag_id;
648 first_len = skb_pagelen(skb);
649 skb->data_len = first_len - skb_headlen(skb);
650 skb->len = first_len;
651 ipv6_hdr(skb)->payload_len = htons(first_len -
652 sizeof(struct ipv6hdr));
657 /* Prepare header of the next frame,
658 * before previous one went down. */
660 frag->ip_summed = CHECKSUM_NONE;
661 skb_reset_transport_header(frag);
662 fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
663 __skb_push(frag, hlen);
664 skb_reset_network_header(frag);
665 memcpy(skb_network_header(frag), tmp_hdr,
667 offset += skb->len - hlen - sizeof(struct frag_hdr);
668 fh->nexthdr = nexthdr;
670 fh->frag_off = htons(offset);
672 fh->frag_off |= htons(IP6_MF);
673 fh->identification = frag_id;
674 ipv6_hdr(frag)->payload_len =
676 sizeof(struct ipv6hdr));
677 ip6_copy_metadata(frag, skb);
680 err = output(sk, skb);
682 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
683 IPSTATS_MIB_FRAGCREATES);
696 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
697 IPSTATS_MIB_FRAGOKS);
702 kfree_skb_list(frag);
704 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
705 IPSTATS_MIB_FRAGFAILS);
710 skb_walk_frags(skb, frag2) {
714 frag2->destructor = NULL;
715 skb->truesize += frag2->truesize;
720 if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
721 skb_checksum_help(skb))
724 left = skb->len - hlen; /* Space per frame */
725 ptr = hlen; /* Where to start from */
728 * Fragment the datagram.
731 *prevhdr = NEXTHDR_FRAGMENT;
732 troom = rt->dst.dev->needed_tailroom;
735 * Keep copying data until we run out.
739 /* IF: it doesn't fit, use 'mtu' - the data space left */
742 /* IF: we are not sending up to and including the packet end
743 then align the next start on an eight byte boundary */
748 /* Allocate buffer */
749 frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
750 hroom + troom, GFP_ATOMIC);
752 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
753 IPSTATS_MIB_FRAGFAILS);
759 * Set up data on packet
762 ip6_copy_metadata(frag, skb);
763 skb_reserve(frag, hroom);
764 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
765 skb_reset_network_header(frag);
766 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
767 frag->transport_header = (frag->network_header + hlen +
768 sizeof(struct frag_hdr));
771 * Charge the memory for the fragment to any owner
775 skb_set_owner_w(frag, skb->sk);
778 * Copy the packet header into the new buffer.
780 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
783 * Build fragment header.
785 fh->nexthdr = nexthdr;
787 fh->identification = frag_id;
790 * Copy a block of the IP datagram.
792 BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
796 fh->frag_off = htons(offset);
798 fh->frag_off |= htons(IP6_MF);
799 ipv6_hdr(frag)->payload_len = htons(frag->len -
800 sizeof(struct ipv6hdr));
806 * Put this fragment into the sending queue.
808 err = output(sk, frag);
812 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
813 IPSTATS_MIB_FRAGCREATES);
815 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
816 IPSTATS_MIB_FRAGOKS);
821 if (skb->sk && dst_allfrag(skb_dst(skb)))
822 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
824 skb->dev = skb_dst(skb)->dev;
825 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
829 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
830 IPSTATS_MIB_FRAGFAILS);
835 static inline int ip6_rt_check(const struct rt6key *rt_key,
836 const struct in6_addr *fl_addr,
837 const struct in6_addr *addr_cache)
839 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
840 (!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
843 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
844 struct dst_entry *dst,
845 const struct flowi6 *fl6)
847 struct ipv6_pinfo *np = inet6_sk(sk);
853 if (dst->ops->family != AF_INET6) {
858 rt = (struct rt6_info *)dst;
859 /* Yes, checking route validity in not connected
860 * case is not very simple. Take into account,
861 * that we do not support routing by source, TOS,
862 * and MSG_DONTROUTE --ANK (980726)
864 * 1. ip6_rt_check(): If route was host route,
865 * check that cached destination is current.
866 * If it is network route, we still may
867 * check its validity using saved pointer
868 * to the last used address: daddr_cache.
869 * We do not want to save whole address now,
870 * (because main consumer of this service
871 * is tcp, which has not this problem),
872 * so that the last trick works only on connected
874 * 2. oif also should be the same.
876 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
877 #ifdef CONFIG_IPV6_SUBTREES
878 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
880 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
889 static int ip6_dst_lookup_tail(struct net *net, struct sock *sk,
890 struct dst_entry **dst, struct flowi6 *fl6)
892 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
898 /* The correct way to handle this would be to do
899 * ip6_route_get_saddr, and then ip6_route_output; however,
900 * the route-specific preferred source forces the
901 * ip6_route_output call _before_ ip6_route_get_saddr.
903 * In source specific routing (no src=any default route),
904 * ip6_route_output will fail given src=any saddr, though, so
905 * that's why we try it again later.
907 if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
909 bool had_dst = *dst != NULL;
912 *dst = ip6_route_output(net, sk, fl6);
913 rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
914 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
915 sk ? inet6_sk(sk)->srcprefs : 0,
918 goto out_err_release;
920 /* If we had an erroneous initial result, pretend it
921 * never existed and let the SA-enabled version take
924 if (!had_dst && (*dst)->error) {
931 *dst = ip6_route_output(net, sk, fl6);
935 goto out_err_release;
937 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
939 * Here if the dst entry we've looked up
940 * has a neighbour entry that is in the INCOMPLETE
941 * state and the src address from the flow is
942 * marked as OPTIMISTIC, we release the found
943 * dst entry and replace it instead with the
944 * dst entry of the nexthop router
946 rt = (struct rt6_info *) *dst;
948 n = __ipv6_neigh_lookup_noref(rt->dst.dev,
949 rt6_nexthop(rt, &fl6->daddr));
950 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
951 rcu_read_unlock_bh();
954 struct inet6_ifaddr *ifp;
955 struct flowi6 fl_gw6;
958 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
961 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
967 * We need to get the dst entry for the
968 * default router instead
971 memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
972 memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
973 *dst = ip6_route_output(net, sk, &fl_gw6);
976 goto out_err_release;
984 if (err == -ENETUNREACH)
985 IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
992 * ip6_dst_lookup - perform route lookup on flow
993 * @sk: socket which provides route info
994 * @dst: pointer to dst_entry * for result
995 * @fl6: flow to lookup
997 * This function performs a route lookup on the given flow.
999 * It returns zero on success, or a standard errno code on error.
1001 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1005 return ip6_dst_lookup_tail(net, sk, dst, fl6);
1007 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1010 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1011 * @sk: socket which provides route info
1012 * @fl6: flow to lookup
1013 * @final_dst: final destination address for ipsec lookup
1015 * This function performs a route lookup on the given flow.
1017 * It returns a valid dst pointer on success, or a pointer encoded
1020 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1021 const struct in6_addr *final_dst)
1023 struct dst_entry *dst = NULL;
1026 err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1028 return ERR_PTR(err);
1030 fl6->daddr = *final_dst;
1031 if (!fl6->flowi6_oif)
1032 fl6->flowi6_oif = dst->dev->ifindex;
1034 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1036 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1039 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1040 * @sk: socket which provides the dst cache and route info
1041 * @fl6: flow to lookup
1042 * @final_dst: final destination address for ipsec lookup
1044 * This function performs a route lookup on the given flow with the
1045 * possibility of using the cached route in the socket if it is valid.
1046 * It will take the socket dst lock when operating on the dst cache.
1047 * As a result, this function can only be used in process context.
1049 * It returns a valid dst pointer on success, or a pointer encoded
1052 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1053 const struct in6_addr *final_dst)
1055 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1058 dst = ip6_sk_dst_check(sk, dst, fl6);
1060 err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1062 return ERR_PTR(err);
1064 fl6->daddr = *final_dst;
1066 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1068 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1070 static inline int ip6_ufo_append_data(struct sock *sk,
1071 struct sk_buff_head *queue,
1072 int getfrag(void *from, char *to, int offset, int len,
1073 int odd, struct sk_buff *skb),
1074 void *from, int length, int hh_len, int fragheaderlen,
1075 int transhdrlen, int mtu, unsigned int flags,
1076 const struct flowi6 *fl6)
1079 struct sk_buff *skb;
1082 /* There is support for UDP large send offload by network
1083 * device, so create one single skb packet containing complete
1086 skb = skb_peek_tail(queue);
1088 skb = sock_alloc_send_skb(sk,
1089 hh_len + fragheaderlen + transhdrlen + 20,
1090 (flags & MSG_DONTWAIT), &err);
1094 /* reserve space for Hardware header */
1095 skb_reserve(skb, hh_len);
1097 /* create space for UDP/IP header */
1098 skb_put(skb, fragheaderlen + transhdrlen);
1100 /* initialize network header pointer */
1101 skb_reset_network_header(skb);
1103 /* initialize protocol header pointer */
1104 skb->transport_header = skb->network_header + fragheaderlen;
1106 skb->protocol = htons(ETH_P_IPV6);
1109 __skb_queue_tail(queue, skb);
1110 } else if (skb_is_gso(skb)) {
1114 skb->ip_summed = CHECKSUM_PARTIAL;
1115 /* Specify the length of each IPv6 datagram fragment.
1116 * It has to be a multiple of 8.
1118 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1119 sizeof(struct frag_hdr)) & ~7;
1120 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1121 skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk),
1126 return skb_append_datato_frags(sk, skb, getfrag, from,
1127 (length - transhdrlen));
1130 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1133 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1136 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1139 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1142 static void ip6_append_data_mtu(unsigned int *mtu,
1144 unsigned int fragheaderlen,
1145 struct sk_buff *skb,
1146 struct rt6_info *rt,
1147 unsigned int orig_mtu)
1149 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1151 /* first fragment, reserve header_len */
1152 *mtu = orig_mtu - rt->dst.header_len;
1156 * this fragment is not first, the headers
1157 * space is regarded as data space.
1161 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1162 + fragheaderlen - sizeof(struct frag_hdr);
1166 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1167 struct inet6_cork *v6_cork,
1168 int hlimit, int tclass, struct ipv6_txoptions *opt,
1169 struct rt6_info *rt, struct flowi6 *fl6)
1171 struct ipv6_pinfo *np = inet6_sk(sk);
1178 if (WARN_ON(v6_cork->opt))
1181 v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
1182 if (unlikely(!v6_cork->opt))
1185 v6_cork->opt->tot_len = opt->tot_len;
1186 v6_cork->opt->opt_flen = opt->opt_flen;
1187 v6_cork->opt->opt_nflen = opt->opt_nflen;
1189 v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1191 if (opt->dst0opt && !v6_cork->opt->dst0opt)
1194 v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1196 if (opt->dst1opt && !v6_cork->opt->dst1opt)
1199 v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1201 if (opt->hopopt && !v6_cork->opt->hopopt)
1204 v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1206 if (opt->srcrt && !v6_cork->opt->srcrt)
1209 /* need source address above miyazawa*/
1212 cork->base.dst = &rt->dst;
1213 cork->fl.u.ip6 = *fl6;
1214 v6_cork->hop_limit = hlimit;
1215 v6_cork->tclass = tclass;
1216 if (rt->dst.flags & DST_XFRM_TUNNEL)
1217 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1218 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1220 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1221 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1222 if (np->frag_size < mtu) {
1224 mtu = np->frag_size;
1226 cork->base.fragsize = mtu;
1227 if (dst_allfrag(rt->dst.path))
1228 cork->base.flags |= IPCORK_ALLFRAG;
1229 cork->base.length = 0;
1234 static int __ip6_append_data(struct sock *sk,
1236 struct sk_buff_head *queue,
1237 struct inet_cork *cork,
1238 struct inet6_cork *v6_cork,
1239 struct page_frag *pfrag,
1240 int getfrag(void *from, char *to, int offset,
1241 int len, int odd, struct sk_buff *skb),
1242 void *from, int length, int transhdrlen,
1243 unsigned int flags, int dontfrag)
1245 struct sk_buff *skb, *skb_prev = NULL;
1246 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1248 int dst_exthdrlen = 0;
1255 struct rt6_info *rt = (struct rt6_info *)cork->dst;
1256 struct ipv6_txoptions *opt = v6_cork->opt;
1257 int csummode = CHECKSUM_NONE;
1259 skb = skb_peek_tail(queue);
1261 exthdrlen = opt ? opt->opt_flen : 0;
1262 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1265 mtu = cork->fragsize;
1268 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1270 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1271 (opt ? opt->opt_nflen : 0);
1272 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1273 sizeof(struct frag_hdr);
1275 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1276 unsigned int maxnonfragsize, headersize;
1278 headersize = sizeof(struct ipv6hdr) +
1279 (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1280 (dst_allfrag(&rt->dst) ?
1281 sizeof(struct frag_hdr) : 0) +
1282 rt->rt6i_nfheader_len;
1284 if (ip6_sk_ignore_df(sk))
1285 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1287 maxnonfragsize = mtu;
1289 /* dontfrag active */
1290 if ((cork->length + length > mtu - headersize) && dontfrag &&
1291 (sk->sk_protocol == IPPROTO_UDP ||
1292 sk->sk_protocol == IPPROTO_RAW)) {
1293 ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1294 sizeof(struct ipv6hdr));
1298 if (cork->length + length > maxnonfragsize - headersize) {
1300 ipv6_local_error(sk, EMSGSIZE, fl6,
1302 sizeof(struct ipv6hdr));
1307 if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1308 sock_tx_timestamp(sk, &tx_flags);
1309 if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1310 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1311 tskey = sk->sk_tskey++;
1314 /* If this is the first and only packet and device
1315 * supports checksum offloading, let's use it.
1316 * Use transhdrlen, same as IPv4, because partial
1317 * sums only work when transhdrlen is set.
1319 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1320 length + fragheaderlen < mtu &&
1321 rt->dst.dev->features & NETIF_F_V6_CSUM &&
1323 csummode = CHECKSUM_PARTIAL;
1325 * Let's try using as much space as possible.
1326 * Use MTU if total length of the message fits into the MTU.
1327 * Otherwise, we need to reserve fragment header and
1328 * fragment alignment (= 8-15 octects, in total).
1330 * Note that we may need to "move" the data from the tail of
1331 * of the buffer to the new fragment when we split
1334 * FIXME: It may be fragmented into multiple chunks
1335 * at once if non-fragmentable extension headers
1340 cork->length += length;
1341 if (((length > mtu) ||
1342 (skb && skb_is_gso(skb))) &&
1343 (sk->sk_protocol == IPPROTO_UDP) &&
1344 (rt->dst.dev->features & NETIF_F_UFO) &&
1345 (sk->sk_type == SOCK_DGRAM)) {
1346 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1347 hh_len, fragheaderlen,
1348 transhdrlen, mtu, flags, fl6);
1357 while (length > 0) {
1358 /* Check if the remaining data fits into current packet. */
1359 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1361 copy = maxfraglen - skb->len;
1365 unsigned int datalen;
1366 unsigned int fraglen;
1367 unsigned int fraggap;
1368 unsigned int alloclen;
1370 /* There's no room in the current skb */
1372 fraggap = skb->len - maxfraglen;
1375 /* update mtu and maxfraglen if necessary */
1376 if (!skb || !skb_prev)
1377 ip6_append_data_mtu(&mtu, &maxfraglen,
1378 fragheaderlen, skb, rt,
1384 * If remaining data exceeds the mtu,
1385 * we know we need more fragment(s).
1387 datalen = length + fraggap;
1389 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1390 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1391 if ((flags & MSG_MORE) &&
1392 !(rt->dst.dev->features&NETIF_F_SG))
1395 alloclen = datalen + fragheaderlen;
1397 alloclen += dst_exthdrlen;
1399 if (datalen != length + fraggap) {
1401 * this is not the last fragment, the trailer
1402 * space is regarded as data space.
1404 datalen += rt->dst.trailer_len;
1407 alloclen += rt->dst.trailer_len;
1408 fraglen = datalen + fragheaderlen;
1411 * We just reserve space for fragment header.
1412 * Note: this may be overallocation if the message
1413 * (without MSG_MORE) fits into the MTU.
1415 alloclen += sizeof(struct frag_hdr);
1418 skb = sock_alloc_send_skb(sk,
1420 (flags & MSG_DONTWAIT), &err);
1423 if (atomic_read(&sk->sk_wmem_alloc) <=
1425 skb = sock_wmalloc(sk,
1426 alloclen + hh_len, 1,
1434 * Fill in the control structures
1436 skb->protocol = htons(ETH_P_IPV6);
1437 skb->ip_summed = csummode;
1439 /* reserve for fragmentation and ipsec header */
1440 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1443 /* Only the initial fragment is time stamped */
1444 skb_shinfo(skb)->tx_flags = tx_flags;
1446 skb_shinfo(skb)->tskey = tskey;
1450 * Find where to start putting bytes
1452 data = skb_put(skb, fraglen);
1453 skb_set_network_header(skb, exthdrlen);
1454 data += fragheaderlen;
1455 skb->transport_header = (skb->network_header +
1458 skb->csum = skb_copy_and_csum_bits(
1459 skb_prev, maxfraglen,
1460 data + transhdrlen, fraggap, 0);
1461 skb_prev->csum = csum_sub(skb_prev->csum,
1464 pskb_trim_unique(skb_prev, maxfraglen);
1466 copy = datalen - transhdrlen - fraggap;
1472 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1479 length -= datalen - fraggap;
1485 * Put the packet on the pending queue
1487 __skb_queue_tail(queue, skb);
1494 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1498 if (getfrag(from, skb_put(skb, copy),
1499 offset, copy, off, skb) < 0) {
1500 __skb_trim(skb, off);
1505 int i = skb_shinfo(skb)->nr_frags;
1508 if (!sk_page_frag_refill(sk, pfrag))
1511 if (!skb_can_coalesce(skb, i, pfrag->page,
1514 if (i == MAX_SKB_FRAGS)
1517 __skb_fill_page_desc(skb, i, pfrag->page,
1519 skb_shinfo(skb)->nr_frags = ++i;
1520 get_page(pfrag->page);
1522 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1524 page_address(pfrag->page) + pfrag->offset,
1525 offset, copy, skb->len, skb) < 0)
1528 pfrag->offset += copy;
1529 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1531 skb->data_len += copy;
1532 skb->truesize += copy;
1533 atomic_add(copy, &sk->sk_wmem_alloc);
1544 cork->length -= length;
1545 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1549 int ip6_append_data(struct sock *sk,
1550 int getfrag(void *from, char *to, int offset, int len,
1551 int odd, struct sk_buff *skb),
1552 void *from, int length, int transhdrlen, int hlimit,
1553 int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1554 struct rt6_info *rt, unsigned int flags, int dontfrag)
1556 struct inet_sock *inet = inet_sk(sk);
1557 struct ipv6_pinfo *np = inet6_sk(sk);
1561 if (flags&MSG_PROBE)
1563 if (skb_queue_empty(&sk->sk_write_queue)) {
1567 err = ip6_setup_cork(sk, &inet->cork, &np->cork, hlimit,
1568 tclass, opt, rt, fl6);
1572 exthdrlen = (opt ? opt->opt_flen : 0);
1573 length += exthdrlen;
1574 transhdrlen += exthdrlen;
1576 fl6 = &inet->cork.fl.u.ip6;
1580 return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1581 &np->cork, sk_page_frag(sk), getfrag,
1582 from, length, transhdrlen, flags, dontfrag);
1584 EXPORT_SYMBOL_GPL(ip6_append_data);
1586 static void ip6_cork_release(struct inet_cork_full *cork,
1587 struct inet6_cork *v6_cork)
1590 kfree(v6_cork->opt->dst0opt);
1591 kfree(v6_cork->opt->dst1opt);
1592 kfree(v6_cork->opt->hopopt);
1593 kfree(v6_cork->opt->srcrt);
1594 kfree(v6_cork->opt);
1595 v6_cork->opt = NULL;
1598 if (cork->base.dst) {
1599 dst_release(cork->base.dst);
1600 cork->base.dst = NULL;
1601 cork->base.flags &= ~IPCORK_ALLFRAG;
1603 memset(&cork->fl, 0, sizeof(cork->fl));
1606 struct sk_buff *__ip6_make_skb(struct sock *sk,
1607 struct sk_buff_head *queue,
1608 struct inet_cork_full *cork,
1609 struct inet6_cork *v6_cork)
1611 struct sk_buff *skb, *tmp_skb;
1612 struct sk_buff **tail_skb;
1613 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1614 struct ipv6_pinfo *np = inet6_sk(sk);
1615 struct net *net = sock_net(sk);
1616 struct ipv6hdr *hdr;
1617 struct ipv6_txoptions *opt = v6_cork->opt;
1618 struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1619 struct flowi6 *fl6 = &cork->fl.u.ip6;
1620 unsigned char proto = fl6->flowi6_proto;
1622 skb = __skb_dequeue(queue);
1625 tail_skb = &(skb_shinfo(skb)->frag_list);
1627 /* move skb->data to ip header from ext header */
1628 if (skb->data < skb_network_header(skb))
1629 __skb_pull(skb, skb_network_offset(skb));
1630 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1631 __skb_pull(tmp_skb, skb_network_header_len(skb));
1632 *tail_skb = tmp_skb;
1633 tail_skb = &(tmp_skb->next);
1634 skb->len += tmp_skb->len;
1635 skb->data_len += tmp_skb->len;
1636 skb->truesize += tmp_skb->truesize;
1637 tmp_skb->destructor = NULL;
1641 /* Allow local fragmentation. */
1642 skb->ignore_df = ip6_sk_ignore_df(sk);
1644 *final_dst = fl6->daddr;
1645 __skb_pull(skb, skb_network_header_len(skb));
1646 if (opt && opt->opt_flen)
1647 ipv6_push_frag_opts(skb, opt, &proto);
1648 if (opt && opt->opt_nflen)
1649 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1651 skb_push(skb, sizeof(struct ipv6hdr));
1652 skb_reset_network_header(skb);
1653 hdr = ipv6_hdr(skb);
1655 ip6_flow_hdr(hdr, v6_cork->tclass,
1656 ip6_make_flowlabel(net, skb, fl6->flowlabel,
1657 np->autoflowlabel, fl6));
1658 hdr->hop_limit = v6_cork->hop_limit;
1659 hdr->nexthdr = proto;
1660 hdr->saddr = fl6->saddr;
1661 hdr->daddr = *final_dst;
1663 skb->priority = sk->sk_priority;
1664 skb->mark = sk->sk_mark;
1666 skb_dst_set(skb, dst_clone(&rt->dst));
1667 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1668 if (proto == IPPROTO_ICMPV6) {
1669 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1671 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1672 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1675 ip6_cork_release(cork, v6_cork);
1680 int ip6_send_skb(struct sk_buff *skb)
1682 struct net *net = sock_net(skb->sk);
1683 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1686 err = ip6_local_out(skb);
1689 err = net_xmit_errno(err);
1691 IP6_INC_STATS(net, rt->rt6i_idev,
1692 IPSTATS_MIB_OUTDISCARDS);
1698 int ip6_push_pending_frames(struct sock *sk)
1700 struct sk_buff *skb;
1702 skb = ip6_finish_skb(sk);
1706 return ip6_send_skb(skb);
1708 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1710 static void __ip6_flush_pending_frames(struct sock *sk,
1711 struct sk_buff_head *queue,
1712 struct inet_cork_full *cork,
1713 struct inet6_cork *v6_cork)
1715 struct sk_buff *skb;
1717 while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1719 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1720 IPSTATS_MIB_OUTDISCARDS);
1724 ip6_cork_release(cork, v6_cork);
1727 void ip6_flush_pending_frames(struct sock *sk)
1729 __ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1730 &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1732 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1734 struct sk_buff *ip6_make_skb(struct sock *sk,
1735 int getfrag(void *from, char *to, int offset,
1736 int len, int odd, struct sk_buff *skb),
1737 void *from, int length, int transhdrlen,
1738 int hlimit, int tclass,
1739 struct ipv6_txoptions *opt, struct flowi6 *fl6,
1740 struct rt6_info *rt, unsigned int flags,
1743 struct inet_cork_full cork;
1744 struct inet6_cork v6_cork;
1745 struct sk_buff_head queue;
1746 int exthdrlen = (opt ? opt->opt_flen : 0);
1749 if (flags & MSG_PROBE)
1752 __skb_queue_head_init(&queue);
1754 cork.base.flags = 0;
1756 cork.base.opt = NULL;
1758 err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
1760 return ERR_PTR(err);
1763 dontfrag = inet6_sk(sk)->dontfrag;
1765 err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
1766 ¤t->task_frag, getfrag, from,
1767 length + exthdrlen, transhdrlen + exthdrlen,
1770 __ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
1771 return ERR_PTR(err);
1774 return __ip6_make_skb(sk, &queue, &cork, &v6_cork);