2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
59 static int ip6_finish_output2(struct sk_buff *skb)
61 struct dst_entry *dst = skb_dst(skb);
62 struct net_device *dev = dst->dev;
63 struct neighbour *neigh;
64 struct in6_addr *nexthop;
67 skb->protocol = htons(ETH_P_IPV6);
70 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
71 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
73 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
74 ((mroute6_socket(dev_net(dev), skb) &&
75 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
76 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
77 &ipv6_hdr(skb)->saddr))) {
78 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
80 /* Do not check for IFF_ALLMULTI; multicast routing
81 is not supported in any case.
84 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
85 newskb, NULL, newskb->dev,
88 if (ipv6_hdr(skb)->hop_limit == 0) {
89 IP6_INC_STATS(dev_net(dev), idev,
90 IPSTATS_MIB_OUTDISCARDS);
96 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
99 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 IPV6_ADDR_SCOPE_NODELOCAL &&
101 !(dev->flags & IFF_LOOPBACK)) {
108 nexthop = rt6_nexthop((struct rt6_info *)dst);
109 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 if (unlikely(!neigh))
111 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 if (!IS_ERR(neigh)) {
113 ret = dst_neigh_output(dst, neigh, skb);
114 rcu_read_unlock_bh();
117 rcu_read_unlock_bh();
119 IP6_INC_STATS(dev_net(dst->dev),
120 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
125 static int ip6_finish_output(struct sk_buff *skb)
127 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
128 dst_allfrag(skb_dst(skb)) ||
129 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
130 return ip6_fragment(skb, ip6_finish_output2);
132 return ip6_finish_output2(skb);
135 int ip6_output(struct sock *sk, struct sk_buff *skb)
137 struct net_device *dev = skb_dst(skb)->dev;
138 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
139 if (unlikely(idev->cnf.disable_ipv6)) {
140 IP6_INC_STATS(dev_net(dev), idev,
141 IPSTATS_MIB_OUTDISCARDS);
146 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
148 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
152 * xmit an sk_buff (used by TCP, SCTP and DCCP)
155 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
156 struct ipv6_txoptions *opt, int tclass)
158 struct net *net = sock_net(sk);
159 struct ipv6_pinfo *np = inet6_sk(sk);
160 struct in6_addr *first_hop = &fl6->daddr;
161 struct dst_entry *dst = skb_dst(skb);
163 u8 proto = fl6->flowi6_proto;
164 int seg_len = skb->len;
169 unsigned int head_room;
171 /* First: exthdrs may take lots of space (~8K for now)
172 MAX_HEADER is not enough.
174 head_room = opt->opt_nflen + opt->opt_flen;
175 seg_len += head_room;
176 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
178 if (skb_headroom(skb) < head_room) {
179 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
181 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
182 IPSTATS_MIB_OUTDISCARDS);
188 skb_set_owner_w(skb, sk);
191 ipv6_push_frag_opts(skb, opt, &proto);
193 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
196 skb_push(skb, sizeof(struct ipv6hdr));
197 skb_reset_network_header(skb);
201 * Fill in the IPv6 header
204 hlimit = np->hop_limit;
206 hlimit = ip6_dst_hoplimit(dst);
208 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
211 hdr->payload_len = htons(seg_len);
212 hdr->nexthdr = proto;
213 hdr->hop_limit = hlimit;
215 hdr->saddr = fl6->saddr;
216 hdr->daddr = *first_hop;
218 skb->protocol = htons(ETH_P_IPV6);
219 skb->priority = sk->sk_priority;
220 skb->mark = sk->sk_mark;
223 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
224 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
225 IPSTATS_MIB_OUT, skb->len);
226 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
227 dst->dev, dst_output);
231 ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
232 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
236 EXPORT_SYMBOL(ip6_xmit);
238 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
240 struct ip6_ra_chain *ra;
241 struct sock *last = NULL;
243 read_lock(&ip6_ra_lock);
244 for (ra = ip6_ra_chain; ra; ra = ra->next) {
245 struct sock *sk = ra->sk;
246 if (sk && ra->sel == sel &&
247 (!sk->sk_bound_dev_if ||
248 sk->sk_bound_dev_if == skb->dev->ifindex)) {
250 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
252 rawv6_rcv(last, skb2);
259 rawv6_rcv(last, skb);
260 read_unlock(&ip6_ra_lock);
263 read_unlock(&ip6_ra_lock);
267 static int ip6_forward_proxy_check(struct sk_buff *skb)
269 struct ipv6hdr *hdr = ipv6_hdr(skb);
270 u8 nexthdr = hdr->nexthdr;
274 if (ipv6_ext_hdr(nexthdr)) {
275 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
279 offset = sizeof(struct ipv6hdr);
281 if (nexthdr == IPPROTO_ICMPV6) {
282 struct icmp6hdr *icmp6;
284 if (!pskb_may_pull(skb, (skb_network_header(skb) +
285 offset + 1 - skb->data)))
288 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
290 switch (icmp6->icmp6_type) {
291 case NDISC_ROUTER_SOLICITATION:
292 case NDISC_ROUTER_ADVERTISEMENT:
293 case NDISC_NEIGHBOUR_SOLICITATION:
294 case NDISC_NEIGHBOUR_ADVERTISEMENT:
296 /* For reaction involving unicast neighbor discovery
297 * message destined to the proxied address, pass it to
307 * The proxying router can't forward traffic sent to a link-local
308 * address, so signal the sender and discard the packet. This
309 * behavior is clarified by the MIPv6 specification.
311 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
312 dst_link_failure(skb);
319 static inline int ip6_forward_finish(struct sk_buff *skb)
321 skb_sender_cpu_clear(skb);
322 return dst_output(skb);
325 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
328 struct inet6_dev *idev;
330 if (dst_metric_locked(dst, RTAX_MTU)) {
331 mtu = dst_metric_raw(dst, RTAX_MTU);
338 idev = __in6_dev_get(dst->dev);
340 mtu = idev->cnf.mtu6;
346 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
351 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
352 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
358 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
364 int ip6_forward(struct sk_buff *skb)
366 struct dst_entry *dst = skb_dst(skb);
367 struct ipv6hdr *hdr = ipv6_hdr(skb);
368 struct inet6_skb_parm *opt = IP6CB(skb);
369 struct net *net = dev_net(dst->dev);
372 if (net->ipv6.devconf_all->forwarding == 0)
375 if (skb->pkt_type != PACKET_HOST)
378 if (skb_warn_if_lro(skb))
381 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
382 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
383 IPSTATS_MIB_INDISCARDS);
387 skb_forward_csum(skb);
390 * We DO NOT make any processing on
391 * RA packets, pushing them to user level AS IS
392 * without ane WARRANTY that application will be able
393 * to interpret them. The reason is that we
394 * cannot make anything clever here.
396 * We are not end-node, so that if packet contains
397 * AH/ESP, we cannot make anything.
398 * Defragmentation also would be mistake, RA packets
399 * cannot be fragmented, because there is no warranty
400 * that different fragments will go along one path. --ANK
402 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
403 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
408 * check and decrement ttl
410 if (hdr->hop_limit <= 1) {
411 /* Force OUTPUT device used as source address */
413 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
414 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
415 IPSTATS_MIB_INHDRERRORS);
421 /* XXX: idev->cnf.proxy_ndp? */
422 if (net->ipv6.devconf_all->proxy_ndp &&
423 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
424 int proxied = ip6_forward_proxy_check(skb);
426 return ip6_input(skb);
427 else if (proxied < 0) {
428 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
429 IPSTATS_MIB_INDISCARDS);
434 if (!xfrm6_route_forward(skb)) {
435 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
436 IPSTATS_MIB_INDISCARDS);
441 /* IPv6 specs say nothing about it, but it is clear that we cannot
442 send redirects to source routed frames.
443 We don't send redirects to frames decapsulated from IPsec.
445 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
446 struct in6_addr *target = NULL;
447 struct inet_peer *peer;
451 * incoming and outgoing devices are the same
455 rt = (struct rt6_info *) dst;
456 if (rt->rt6i_flags & RTF_GATEWAY)
457 target = &rt->rt6i_gateway;
459 target = &hdr->daddr;
461 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
463 /* Limit redirects both by destination (here)
464 and by source (inside ndisc_send_redirect)
466 if (inet_peer_xrlim_allow(peer, 1*HZ))
467 ndisc_send_redirect(skb, target);
471 int addrtype = ipv6_addr_type(&hdr->saddr);
473 /* This check is security critical. */
474 if (addrtype == IPV6_ADDR_ANY ||
475 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
477 if (addrtype & IPV6_ADDR_LINKLOCAL) {
478 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
479 ICMPV6_NOT_NEIGHBOUR, 0);
484 mtu = ip6_dst_mtu_forward(dst);
485 if (mtu < IPV6_MIN_MTU)
488 if (ip6_pkt_too_big(skb, mtu)) {
489 /* Again, force OUTPUT device used as source address */
491 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
492 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
493 IPSTATS_MIB_INTOOBIGERRORS);
494 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
495 IPSTATS_MIB_FRAGFAILS);
500 if (skb_cow(skb, dst->dev->hard_header_len)) {
501 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
502 IPSTATS_MIB_OUTDISCARDS);
508 /* Mangling hops number delayed to point after skb COW */
512 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
513 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
514 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
518 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
524 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
526 to->pkt_type = from->pkt_type;
527 to->priority = from->priority;
528 to->protocol = from->protocol;
530 skb_dst_set(to, dst_clone(skb_dst(from)));
532 to->mark = from->mark;
534 #ifdef CONFIG_NET_SCHED
535 to->tc_index = from->tc_index;
538 skb_copy_secmark(to, from);
541 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
543 struct sk_buff *frag;
544 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
545 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
546 struct ipv6hdr *tmp_hdr;
548 unsigned int mtu, hlen, left, len;
551 int ptr, offset = 0, err = 0;
552 u8 *prevhdr, nexthdr = 0;
553 struct net *net = dev_net(skb_dst(skb)->dev);
555 hlen = ip6_find_1stfragopt(skb, &prevhdr);
558 mtu = ip6_skb_dst_mtu(skb);
560 /* We must not fragment if the socket is set to force MTU discovery
561 * or if the skb it not generated by a local socket.
563 if (unlikely(!skb->ignore_df && skb->len > mtu) ||
564 (IP6CB(skb)->frag_max_size &&
565 IP6CB(skb)->frag_max_size > mtu)) {
566 if (skb->sk && dst_allfrag(skb_dst(skb)))
567 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
569 skb->dev = skb_dst(skb)->dev;
570 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
571 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
572 IPSTATS_MIB_FRAGFAILS);
577 if (np && np->frag_size < mtu) {
581 mtu -= hlen + sizeof(struct frag_hdr);
583 if (skb_has_frag_list(skb)) {
584 int first_len = skb_pagelen(skb);
585 struct sk_buff *frag2;
587 if (first_len - hlen > mtu ||
588 ((first_len - hlen) & 7) ||
592 skb_walk_frags(skb, frag) {
593 /* Correct geometry. */
594 if (frag->len > mtu ||
595 ((frag->len & 7) && frag->next) ||
596 skb_headroom(frag) < hlen)
597 goto slow_path_clean;
599 /* Partially cloned skb? */
600 if (skb_shared(frag))
601 goto slow_path_clean;
606 frag->destructor = sock_wfree;
608 skb->truesize -= frag->truesize;
613 frag = skb_shinfo(skb)->frag_list;
614 skb_frag_list_init(skb);
617 *prevhdr = NEXTHDR_FRAGMENT;
618 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
620 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
621 IPSTATS_MIB_FRAGFAILS);
625 __skb_pull(skb, hlen);
626 fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
627 __skb_push(skb, hlen);
628 skb_reset_network_header(skb);
629 memcpy(skb_network_header(skb), tmp_hdr, hlen);
631 ipv6_select_ident(fh, rt);
632 fh->nexthdr = nexthdr;
634 fh->frag_off = htons(IP6_MF);
635 frag_id = fh->identification;
637 first_len = skb_pagelen(skb);
638 skb->data_len = first_len - skb_headlen(skb);
639 skb->len = first_len;
640 ipv6_hdr(skb)->payload_len = htons(first_len -
641 sizeof(struct ipv6hdr));
646 /* Prepare header of the next frame,
647 * before previous one went down. */
649 frag->ip_summed = CHECKSUM_NONE;
650 skb_reset_transport_header(frag);
651 fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
652 __skb_push(frag, hlen);
653 skb_reset_network_header(frag);
654 memcpy(skb_network_header(frag), tmp_hdr,
656 offset += skb->len - hlen - sizeof(struct frag_hdr);
657 fh->nexthdr = nexthdr;
659 fh->frag_off = htons(offset);
660 if (frag->next != NULL)
661 fh->frag_off |= htons(IP6_MF);
662 fh->identification = frag_id;
663 ipv6_hdr(frag)->payload_len =
665 sizeof(struct ipv6hdr));
666 ip6_copy_metadata(frag, skb);
671 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
672 IPSTATS_MIB_FRAGCREATES);
685 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
686 IPSTATS_MIB_FRAGOKS);
691 kfree_skb_list(frag);
693 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
694 IPSTATS_MIB_FRAGFAILS);
699 skb_walk_frags(skb, frag2) {
703 frag2->destructor = NULL;
704 skb->truesize += frag2->truesize;
709 if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
710 skb_checksum_help(skb))
713 left = skb->len - hlen; /* Space per frame */
714 ptr = hlen; /* Where to start from */
717 * Fragment the datagram.
720 *prevhdr = NEXTHDR_FRAGMENT;
721 hroom = LL_RESERVED_SPACE(rt->dst.dev);
722 troom = rt->dst.dev->needed_tailroom;
725 * Keep copying data until we run out.
729 /* IF: it doesn't fit, use 'mtu' - the data space left */
732 /* IF: we are not sending up to and including the packet end
733 then align the next start on an eight byte boundary */
738 /* Allocate buffer */
739 frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
740 hroom + troom, GFP_ATOMIC);
742 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
743 IPSTATS_MIB_FRAGFAILS);
749 * Set up data on packet
752 ip6_copy_metadata(frag, skb);
753 skb_reserve(frag, hroom);
754 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
755 skb_reset_network_header(frag);
756 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
757 frag->transport_header = (frag->network_header + hlen +
758 sizeof(struct frag_hdr));
761 * Charge the memory for the fragment to any owner
765 skb_set_owner_w(frag, skb->sk);
768 * Copy the packet header into the new buffer.
770 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
773 * Build fragment header.
775 fh->nexthdr = nexthdr;
778 ipv6_select_ident(fh, rt);
779 frag_id = fh->identification;
781 fh->identification = frag_id;
784 * Copy a block of the IP datagram.
786 BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
790 fh->frag_off = htons(offset);
792 fh->frag_off |= htons(IP6_MF);
793 ipv6_hdr(frag)->payload_len = htons(frag->len -
794 sizeof(struct ipv6hdr));
800 * Put this fragment into the sending queue.
806 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
807 IPSTATS_MIB_FRAGCREATES);
809 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
810 IPSTATS_MIB_FRAGOKS);
815 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
816 IPSTATS_MIB_FRAGFAILS);
821 static inline int ip6_rt_check(const struct rt6key *rt_key,
822 const struct in6_addr *fl_addr,
823 const struct in6_addr *addr_cache)
825 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
826 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
829 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
830 struct dst_entry *dst,
831 const struct flowi6 *fl6)
833 struct ipv6_pinfo *np = inet6_sk(sk);
839 if (dst->ops->family != AF_INET6) {
844 rt = (struct rt6_info *)dst;
845 /* Yes, checking route validity in not connected
846 * case is not very simple. Take into account,
847 * that we do not support routing by source, TOS,
848 * and MSG_DONTROUTE --ANK (980726)
850 * 1. ip6_rt_check(): If route was host route,
851 * check that cached destination is current.
852 * If it is network route, we still may
853 * check its validity using saved pointer
854 * to the last used address: daddr_cache.
855 * We do not want to save whole address now,
856 * (because main consumer of this service
857 * is tcp, which has not this problem),
858 * so that the last trick works only on connected
860 * 2. oif also should be the same.
862 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
863 #ifdef CONFIG_IPV6_SUBTREES
864 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
866 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
875 static int ip6_dst_lookup_tail(struct sock *sk,
876 struct dst_entry **dst, struct flowi6 *fl6)
878 struct net *net = sock_net(sk);
879 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
886 *dst = ip6_route_output(net, sk, fl6);
890 goto out_err_release;
892 if (ipv6_addr_any(&fl6->saddr)) {
893 struct rt6_info *rt = (struct rt6_info *) *dst;
894 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
895 sk ? inet6_sk(sk)->srcprefs : 0,
898 goto out_err_release;
901 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
903 * Here if the dst entry we've looked up
904 * has a neighbour entry that is in the INCOMPLETE
905 * state and the src address from the flow is
906 * marked as OPTIMISTIC, we release the found
907 * dst entry and replace it instead with the
908 * dst entry of the nexthop router
910 rt = (struct rt6_info *) *dst;
912 n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
913 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
914 rcu_read_unlock_bh();
917 struct inet6_ifaddr *ifp;
918 struct flowi6 fl_gw6;
921 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
924 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
930 * We need to get the dst entry for the
931 * default router instead
934 memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
935 memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
936 *dst = ip6_route_output(net, sk, &fl_gw6);
939 goto out_err_release;
947 if (err == -ENETUNREACH)
948 IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
955 * ip6_dst_lookup - perform route lookup on flow
956 * @sk: socket which provides route info
957 * @dst: pointer to dst_entry * for result
958 * @fl6: flow to lookup
960 * This function performs a route lookup on the given flow.
962 * It returns zero on success, or a standard errno code on error.
964 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
967 return ip6_dst_lookup_tail(sk, dst, fl6);
969 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
972 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
973 * @sk: socket which provides route info
974 * @fl6: flow to lookup
975 * @final_dst: final destination address for ipsec lookup
977 * This function performs a route lookup on the given flow.
979 * It returns a valid dst pointer on success, or a pointer encoded
982 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
983 const struct in6_addr *final_dst)
985 struct dst_entry *dst = NULL;
988 err = ip6_dst_lookup_tail(sk, &dst, fl6);
992 fl6->daddr = *final_dst;
994 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
996 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
999 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1000 * @sk: socket which provides the dst cache and route info
1001 * @fl6: flow to lookup
1002 * @final_dst: final destination address for ipsec lookup
1004 * This function performs a route lookup on the given flow with the
1005 * possibility of using the cached route in the socket if it is valid.
1006 * It will take the socket dst lock when operating on the dst cache.
1007 * As a result, this function can only be used in process context.
1009 * It returns a valid dst pointer on success, or a pointer encoded
1012 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1013 const struct in6_addr *final_dst)
1015 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1018 dst = ip6_sk_dst_check(sk, dst, fl6);
1020 err = ip6_dst_lookup_tail(sk, &dst, fl6);
1022 return ERR_PTR(err);
1024 fl6->daddr = *final_dst;
1026 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1028 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1030 static inline int ip6_ufo_append_data(struct sock *sk,
1031 struct sk_buff_head *queue,
1032 int getfrag(void *from, char *to, int offset, int len,
1033 int odd, struct sk_buff *skb),
1034 void *from, int length, int hh_len, int fragheaderlen,
1035 int transhdrlen, int mtu, unsigned int flags,
1036 struct rt6_info *rt)
1039 struct sk_buff *skb;
1040 struct frag_hdr fhdr;
1043 /* There is support for UDP large send offload by network
1044 * device, so create one single skb packet containing complete
1047 skb = skb_peek_tail(queue);
1049 skb = sock_alloc_send_skb(sk,
1050 hh_len + fragheaderlen + transhdrlen + 20,
1051 (flags & MSG_DONTWAIT), &err);
1055 /* reserve space for Hardware header */
1056 skb_reserve(skb, hh_len);
1058 /* create space for UDP/IP header */
1059 skb_put(skb, fragheaderlen + transhdrlen);
1061 /* initialize network header pointer */
1062 skb_reset_network_header(skb);
1064 /* initialize protocol header pointer */
1065 skb->transport_header = skb->network_header + fragheaderlen;
1067 skb->protocol = htons(ETH_P_IPV6);
1070 __skb_queue_tail(queue, skb);
1071 } else if (skb_is_gso(skb)) {
1075 skb->ip_summed = CHECKSUM_PARTIAL;
1076 /* Specify the length of each IPv6 datagram fragment.
1077 * It has to be a multiple of 8.
1079 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1080 sizeof(struct frag_hdr)) & ~7;
1081 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1082 ipv6_select_ident(&fhdr, rt);
1083 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1086 return skb_append_datato_frags(sk, skb, getfrag, from,
1087 (length - transhdrlen));
1090 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1093 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1096 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1099 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1102 static void ip6_append_data_mtu(unsigned int *mtu,
1104 unsigned int fragheaderlen,
1105 struct sk_buff *skb,
1106 struct rt6_info *rt,
1107 unsigned int orig_mtu)
1109 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1111 /* first fragment, reserve header_len */
1112 *mtu = orig_mtu - rt->dst.header_len;
1116 * this fragment is not first, the headers
1117 * space is regarded as data space.
1121 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1122 + fragheaderlen - sizeof(struct frag_hdr);
1126 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1127 struct inet6_cork *v6_cork,
1128 int hlimit, int tclass, struct ipv6_txoptions *opt,
1129 struct rt6_info *rt, struct flowi6 *fl6)
1131 struct ipv6_pinfo *np = inet6_sk(sk);
1138 if (WARN_ON(v6_cork->opt))
1141 v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
1142 if (unlikely(v6_cork->opt == NULL))
1145 v6_cork->opt->tot_len = opt->tot_len;
1146 v6_cork->opt->opt_flen = opt->opt_flen;
1147 v6_cork->opt->opt_nflen = opt->opt_nflen;
1149 v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1151 if (opt->dst0opt && !v6_cork->opt->dst0opt)
1154 v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1156 if (opt->dst1opt && !v6_cork->opt->dst1opt)
1159 v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1161 if (opt->hopopt && !v6_cork->opt->hopopt)
1164 v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1166 if (opt->srcrt && !v6_cork->opt->srcrt)
1169 /* need source address above miyazawa*/
1172 cork->base.dst = &rt->dst;
1173 cork->fl.u.ip6 = *fl6;
1174 v6_cork->hop_limit = hlimit;
1175 v6_cork->tclass = tclass;
1176 if (rt->dst.flags & DST_XFRM_TUNNEL)
1177 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1178 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1180 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1181 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1182 if (np->frag_size < mtu) {
1184 mtu = np->frag_size;
1186 cork->base.fragsize = mtu;
1187 if (dst_allfrag(rt->dst.path))
1188 cork->base.flags |= IPCORK_ALLFRAG;
1189 cork->base.length = 0;
1194 static int __ip6_append_data(struct sock *sk,
1196 struct sk_buff_head *queue,
1197 struct inet_cork *cork,
1198 struct inet6_cork *v6_cork,
1199 struct page_frag *pfrag,
1200 int getfrag(void *from, char *to, int offset,
1201 int len, int odd, struct sk_buff *skb),
1202 void *from, int length, int transhdrlen,
1203 unsigned int flags, int dontfrag)
1205 struct sk_buff *skb, *skb_prev = NULL;
1206 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1208 int dst_exthdrlen = 0;
1215 struct rt6_info *rt = (struct rt6_info *)cork->dst;
1216 struct ipv6_txoptions *opt = v6_cork->opt;
1217 int csummode = CHECKSUM_NONE;
1219 skb = skb_peek_tail(queue);
1221 exthdrlen = opt ? opt->opt_flen : 0;
1222 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1225 mtu = cork->fragsize;
1228 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1230 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1231 (opt ? opt->opt_nflen : 0);
1232 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1233 sizeof(struct frag_hdr);
1235 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1236 unsigned int maxnonfragsize, headersize;
1238 headersize = sizeof(struct ipv6hdr) +
1239 (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1240 (dst_allfrag(&rt->dst) ?
1241 sizeof(struct frag_hdr) : 0) +
1242 rt->rt6i_nfheader_len;
1244 if (ip6_sk_ignore_df(sk))
1245 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1247 maxnonfragsize = mtu;
1249 /* dontfrag active */
1250 if ((cork->length + length > mtu - headersize) && dontfrag &&
1251 (sk->sk_protocol == IPPROTO_UDP ||
1252 sk->sk_protocol == IPPROTO_RAW)) {
1253 ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1254 sizeof(struct ipv6hdr));
1258 if (cork->length + length > maxnonfragsize - headersize) {
1260 ipv6_local_error(sk, EMSGSIZE, fl6,
1262 sizeof(struct ipv6hdr));
1267 if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1268 sock_tx_timestamp(sk, &tx_flags);
1269 if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1270 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1271 tskey = sk->sk_tskey++;
1274 /* If this is the first and only packet and device
1275 * supports checksum offloading, let's use it.
1277 if (!skb && sk->sk_protocol == IPPROTO_UDP &&
1278 length + fragheaderlen < mtu &&
1279 rt->dst.dev->features & NETIF_F_V6_CSUM &&
1281 csummode = CHECKSUM_PARTIAL;
1283 * Let's try using as much space as possible.
1284 * Use MTU if total length of the message fits into the MTU.
1285 * Otherwise, we need to reserve fragment header and
1286 * fragment alignment (= 8-15 octects, in total).
1288 * Note that we may need to "move" the data from the tail of
1289 * of the buffer to the new fragment when we split
1292 * FIXME: It may be fragmented into multiple chunks
1293 * at once if non-fragmentable extension headers
1298 cork->length += length;
1299 if (((length > mtu) ||
1300 (skb && skb_is_gso(skb))) &&
1301 (sk->sk_protocol == IPPROTO_UDP) &&
1302 (rt->dst.dev->features & NETIF_F_UFO) &&
1303 (sk->sk_type == SOCK_DGRAM)) {
1304 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1305 hh_len, fragheaderlen,
1306 transhdrlen, mtu, flags, rt);
1315 while (length > 0) {
1316 /* Check if the remaining data fits into current packet. */
1317 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1319 copy = maxfraglen - skb->len;
1323 unsigned int datalen;
1324 unsigned int fraglen;
1325 unsigned int fraggap;
1326 unsigned int alloclen;
1328 /* There's no room in the current skb */
1330 fraggap = skb->len - maxfraglen;
1333 /* update mtu and maxfraglen if necessary */
1334 if (skb == NULL || skb_prev == NULL)
1335 ip6_append_data_mtu(&mtu, &maxfraglen,
1336 fragheaderlen, skb, rt,
1342 * If remaining data exceeds the mtu,
1343 * we know we need more fragment(s).
1345 datalen = length + fraggap;
1347 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1348 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1349 if ((flags & MSG_MORE) &&
1350 !(rt->dst.dev->features&NETIF_F_SG))
1353 alloclen = datalen + fragheaderlen;
1355 alloclen += dst_exthdrlen;
1357 if (datalen != length + fraggap) {
1359 * this is not the last fragment, the trailer
1360 * space is regarded as data space.
1362 datalen += rt->dst.trailer_len;
1365 alloclen += rt->dst.trailer_len;
1366 fraglen = datalen + fragheaderlen;
1369 * We just reserve space for fragment header.
1370 * Note: this may be overallocation if the message
1371 * (without MSG_MORE) fits into the MTU.
1373 alloclen += sizeof(struct frag_hdr);
1376 skb = sock_alloc_send_skb(sk,
1378 (flags & MSG_DONTWAIT), &err);
1381 if (atomic_read(&sk->sk_wmem_alloc) <=
1383 skb = sock_wmalloc(sk,
1384 alloclen + hh_len, 1,
1386 if (unlikely(skb == NULL))
1392 * Fill in the control structures
1394 skb->protocol = htons(ETH_P_IPV6);
1395 skb->ip_summed = csummode;
1397 /* reserve for fragmentation and ipsec header */
1398 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1401 /* Only the initial fragment is time stamped */
1402 skb_shinfo(skb)->tx_flags = tx_flags;
1404 skb_shinfo(skb)->tskey = tskey;
1408 * Find where to start putting bytes
1410 data = skb_put(skb, fraglen);
1411 skb_set_network_header(skb, exthdrlen);
1412 data += fragheaderlen;
1413 skb->transport_header = (skb->network_header +
1416 skb->csum = skb_copy_and_csum_bits(
1417 skb_prev, maxfraglen,
1418 data + transhdrlen, fraggap, 0);
1419 skb_prev->csum = csum_sub(skb_prev->csum,
1422 pskb_trim_unique(skb_prev, maxfraglen);
1424 copy = datalen - transhdrlen - fraggap;
1430 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1437 length -= datalen - fraggap;
1443 * Put the packet on the pending queue
1445 __skb_queue_tail(queue, skb);
1452 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1456 if (getfrag(from, skb_put(skb, copy),
1457 offset, copy, off, skb) < 0) {
1458 __skb_trim(skb, off);
1463 int i = skb_shinfo(skb)->nr_frags;
1466 if (!sk_page_frag_refill(sk, pfrag))
1469 if (!skb_can_coalesce(skb, i, pfrag->page,
1472 if (i == MAX_SKB_FRAGS)
1475 __skb_fill_page_desc(skb, i, pfrag->page,
1477 skb_shinfo(skb)->nr_frags = ++i;
1478 get_page(pfrag->page);
1480 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1482 page_address(pfrag->page) + pfrag->offset,
1483 offset, copy, skb->len, skb) < 0)
1486 pfrag->offset += copy;
1487 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1489 skb->data_len += copy;
1490 skb->truesize += copy;
1491 atomic_add(copy, &sk->sk_wmem_alloc);
1502 cork->length -= length;
1503 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1507 int ip6_append_data(struct sock *sk,
1508 int getfrag(void *from, char *to, int offset, int len,
1509 int odd, struct sk_buff *skb),
1510 void *from, int length, int transhdrlen, int hlimit,
1511 int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1512 struct rt6_info *rt, unsigned int flags, int dontfrag)
1514 struct inet_sock *inet = inet_sk(sk);
1515 struct ipv6_pinfo *np = inet6_sk(sk);
1519 if (flags&MSG_PROBE)
1521 if (skb_queue_empty(&sk->sk_write_queue)) {
1525 err = ip6_setup_cork(sk, &inet->cork, &np->cork, hlimit,
1526 tclass, opt, rt, fl6);
1530 exthdrlen = (opt ? opt->opt_flen : 0);
1531 length += exthdrlen;
1532 transhdrlen += exthdrlen;
1534 fl6 = &inet->cork.fl.u.ip6;
1538 return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1539 &np->cork, sk_page_frag(sk), getfrag,
1540 from, length, transhdrlen, flags, dontfrag);
1542 EXPORT_SYMBOL_GPL(ip6_append_data);
1544 static void ip6_cork_release(struct inet_cork_full *cork,
1545 struct inet6_cork *v6_cork)
1548 kfree(v6_cork->opt->dst0opt);
1549 kfree(v6_cork->opt->dst1opt);
1550 kfree(v6_cork->opt->hopopt);
1551 kfree(v6_cork->opt->srcrt);
1552 kfree(v6_cork->opt);
1553 v6_cork->opt = NULL;
1556 if (cork->base.dst) {
1557 dst_release(cork->base.dst);
1558 cork->base.dst = NULL;
1559 cork->base.flags &= ~IPCORK_ALLFRAG;
1561 memset(&cork->fl, 0, sizeof(cork->fl));
1564 struct sk_buff *__ip6_make_skb(struct sock *sk,
1565 struct sk_buff_head *queue,
1566 struct inet_cork_full *cork,
1567 struct inet6_cork *v6_cork)
1569 struct sk_buff *skb, *tmp_skb;
1570 struct sk_buff **tail_skb;
1571 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1572 struct ipv6_pinfo *np = inet6_sk(sk);
1573 struct net *net = sock_net(sk);
1574 struct ipv6hdr *hdr;
1575 struct ipv6_txoptions *opt = v6_cork->opt;
1576 struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1577 struct flowi6 *fl6 = &cork->fl.u.ip6;
1578 unsigned char proto = fl6->flowi6_proto;
1580 skb = __skb_dequeue(queue);
1583 tail_skb = &(skb_shinfo(skb)->frag_list);
1585 /* move skb->data to ip header from ext header */
1586 if (skb->data < skb_network_header(skb))
1587 __skb_pull(skb, skb_network_offset(skb));
1588 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1589 __skb_pull(tmp_skb, skb_network_header_len(skb));
1590 *tail_skb = tmp_skb;
1591 tail_skb = &(tmp_skb->next);
1592 skb->len += tmp_skb->len;
1593 skb->data_len += tmp_skb->len;
1594 skb->truesize += tmp_skb->truesize;
1595 tmp_skb->destructor = NULL;
1599 /* Allow local fragmentation. */
1600 skb->ignore_df = ip6_sk_ignore_df(sk);
1602 *final_dst = fl6->daddr;
1603 __skb_pull(skb, skb_network_header_len(skb));
1604 if (opt && opt->opt_flen)
1605 ipv6_push_frag_opts(skb, opt, &proto);
1606 if (opt && opt->opt_nflen)
1607 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1609 skb_push(skb, sizeof(struct ipv6hdr));
1610 skb_reset_network_header(skb);
1611 hdr = ipv6_hdr(skb);
1613 ip6_flow_hdr(hdr, v6_cork->tclass,
1614 ip6_make_flowlabel(net, skb, fl6->flowlabel,
1615 np->autoflowlabel));
1616 hdr->hop_limit = v6_cork->hop_limit;
1617 hdr->nexthdr = proto;
1618 hdr->saddr = fl6->saddr;
1619 hdr->daddr = *final_dst;
1621 skb->priority = sk->sk_priority;
1622 skb->mark = sk->sk_mark;
1624 skb_dst_set(skb, dst_clone(&rt->dst));
1625 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1626 if (proto == IPPROTO_ICMPV6) {
1627 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1629 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1630 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1633 ip6_cork_release(cork, v6_cork);
1638 int ip6_send_skb(struct sk_buff *skb)
1640 struct net *net = sock_net(skb->sk);
1641 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1644 err = ip6_local_out(skb);
1647 err = net_xmit_errno(err);
1649 IP6_INC_STATS(net, rt->rt6i_idev,
1650 IPSTATS_MIB_OUTDISCARDS);
1656 int ip6_push_pending_frames(struct sock *sk)
1658 struct sk_buff *skb;
1660 skb = ip6_finish_skb(sk);
1664 return ip6_send_skb(skb);
1666 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1668 static void __ip6_flush_pending_frames(struct sock *sk,
1669 struct sk_buff_head *queue,
1670 struct inet_cork_full *cork,
1671 struct inet6_cork *v6_cork)
1673 struct sk_buff *skb;
1675 while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1677 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1678 IPSTATS_MIB_OUTDISCARDS);
1682 ip6_cork_release(cork, v6_cork);
1685 void ip6_flush_pending_frames(struct sock *sk)
1687 __ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1688 &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1690 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1692 struct sk_buff *ip6_make_skb(struct sock *sk,
1693 int getfrag(void *from, char *to, int offset,
1694 int len, int odd, struct sk_buff *skb),
1695 void *from, int length, int transhdrlen,
1696 int hlimit, int tclass,
1697 struct ipv6_txoptions *opt, struct flowi6 *fl6,
1698 struct rt6_info *rt, unsigned int flags,
1701 struct inet_cork_full cork;
1702 struct inet6_cork v6_cork;
1703 struct sk_buff_head queue;
1704 int exthdrlen = (opt ? opt->opt_flen : 0);
1707 if (flags & MSG_PROBE)
1710 __skb_queue_head_init(&queue);
1712 cork.base.flags = 0;
1714 cork.base.opt = NULL;
1716 err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
1718 return ERR_PTR(err);
1721 dontfrag = inet6_sk(sk)->dontfrag;
1723 err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
1724 ¤t->task_frag, getfrag, from,
1725 length + exthdrlen, transhdrlen + exthdrlen,
1728 __ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
1729 return ERR_PTR(err);
1732 return __ip6_make_skb(sk, &queue, &cork, &v6_cork);