2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
59 static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
61 struct dst_entry *dst = skb_dst(skb);
62 struct net_device *dev = dst->dev;
63 struct net *net = dev_net(dev);
64 struct neighbour *neigh;
65 struct in6_addr *nexthop;
68 skb->protocol = htons(ETH_P_IPV6);
71 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
72 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
74 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
75 ((mroute6_socket(net, skb) &&
76 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
77 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
78 &ipv6_hdr(skb)->saddr))) {
79 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
81 /* Do not check for IFF_ALLMULTI; multicast routing
82 is not supported in any case.
85 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
86 net, sk, newskb, NULL, newskb->dev,
89 if (ipv6_hdr(skb)->hop_limit == 0) {
90 IP6_INC_STATS(net, idev,
91 IPSTATS_MIB_OUTDISCARDS);
97 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
99 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 IPV6_ADDR_SCOPE_NODELOCAL &&
101 !(dev->flags & IFF_LOOPBACK)) {
108 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
109 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 if (unlikely(!neigh))
111 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 if (!IS_ERR(neigh)) {
113 ret = dst_neigh_output(dst, neigh, skb);
114 rcu_read_unlock_bh();
117 rcu_read_unlock_bh();
119 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
124 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
126 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
127 dst_allfrag(skb_dst(skb)) ||
128 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
129 return ip6_fragment(sk, skb, ip6_finish_output2);
131 return ip6_finish_output2(sk, skb);
134 int ip6_output(struct sock *sk, struct sk_buff *skb)
136 struct net_device *dev = skb_dst(skb)->dev;
137 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
138 struct net *net = dev_net(dev);
140 if (unlikely(idev->cnf.disable_ipv6)) {
141 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
146 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
147 net, sk, skb, NULL, dev,
149 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
153 * xmit an sk_buff (used by TCP, SCTP and DCCP)
154 * Note : socket lock is not held for SYNACK packets, but might be modified
155 * by calls to skb_set_owner_w() and ipv6_local_error(),
156 * which are using proper atomic operations or spinlocks.
158 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
159 struct ipv6_txoptions *opt, int tclass)
161 struct net *net = sock_net(sk);
162 const struct ipv6_pinfo *np = inet6_sk(sk);
163 struct in6_addr *first_hop = &fl6->daddr;
164 struct dst_entry *dst = skb_dst(skb);
166 u8 proto = fl6->flowi6_proto;
167 int seg_len = skb->len;
172 unsigned int head_room;
174 /* First: exthdrs may take lots of space (~8K for now)
175 MAX_HEADER is not enough.
177 head_room = opt->opt_nflen + opt->opt_flen;
178 seg_len += head_room;
179 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
181 if (skb_headroom(skb) < head_room) {
182 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
184 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
185 IPSTATS_MIB_OUTDISCARDS);
191 /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
192 * it is safe to call in our context (socket lock not held)
194 skb_set_owner_w(skb, (struct sock *)sk);
197 ipv6_push_frag_opts(skb, opt, &proto);
199 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
202 skb_push(skb, sizeof(struct ipv6hdr));
203 skb_reset_network_header(skb);
207 * Fill in the IPv6 header
210 hlimit = np->hop_limit;
212 hlimit = ip6_dst_hoplimit(dst);
214 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
215 np->autoflowlabel, fl6));
217 hdr->payload_len = htons(seg_len);
218 hdr->nexthdr = proto;
219 hdr->hop_limit = hlimit;
221 hdr->saddr = fl6->saddr;
222 hdr->daddr = *first_hop;
224 skb->protocol = htons(ETH_P_IPV6);
225 skb->priority = sk->sk_priority;
226 skb->mark = sk->sk_mark;
229 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
230 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
231 IPSTATS_MIB_OUT, skb->len);
232 /* hooks should never assume socket lock is held.
233 * we promote our socket to non const
235 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
236 net, (struct sock *)sk, skb, NULL, dst->dev,
241 /* ipv6_local_error() does not require socket lock,
242 * we promote our socket to non const
244 ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
246 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
250 EXPORT_SYMBOL(ip6_xmit);
252 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
254 struct ip6_ra_chain *ra;
255 struct sock *last = NULL;
257 read_lock(&ip6_ra_lock);
258 for (ra = ip6_ra_chain; ra; ra = ra->next) {
259 struct sock *sk = ra->sk;
260 if (sk && ra->sel == sel &&
261 (!sk->sk_bound_dev_if ||
262 sk->sk_bound_dev_if == skb->dev->ifindex)) {
264 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
266 rawv6_rcv(last, skb2);
273 rawv6_rcv(last, skb);
274 read_unlock(&ip6_ra_lock);
277 read_unlock(&ip6_ra_lock);
281 static int ip6_forward_proxy_check(struct sk_buff *skb)
283 struct ipv6hdr *hdr = ipv6_hdr(skb);
284 u8 nexthdr = hdr->nexthdr;
288 if (ipv6_ext_hdr(nexthdr)) {
289 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
293 offset = sizeof(struct ipv6hdr);
295 if (nexthdr == IPPROTO_ICMPV6) {
296 struct icmp6hdr *icmp6;
298 if (!pskb_may_pull(skb, (skb_network_header(skb) +
299 offset + 1 - skb->data)))
302 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
304 switch (icmp6->icmp6_type) {
305 case NDISC_ROUTER_SOLICITATION:
306 case NDISC_ROUTER_ADVERTISEMENT:
307 case NDISC_NEIGHBOUR_SOLICITATION:
308 case NDISC_NEIGHBOUR_ADVERTISEMENT:
310 /* For reaction involving unicast neighbor discovery
311 * message destined to the proxied address, pass it to
321 * The proxying router can't forward traffic sent to a link-local
322 * address, so signal the sender and discard the packet. This
323 * behavior is clarified by the MIPv6 specification.
325 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
326 dst_link_failure(skb);
333 static inline int ip6_forward_finish(struct net *net, struct sock *sk,
336 skb_sender_cpu_clear(skb);
337 return dst_output(sk, skb);
340 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
343 struct inet6_dev *idev;
345 if (dst_metric_locked(dst, RTAX_MTU)) {
346 mtu = dst_metric_raw(dst, RTAX_MTU);
353 idev = __in6_dev_get(dst->dev);
355 mtu = idev->cnf.mtu6;
361 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
366 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
367 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
373 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
379 int ip6_forward(struct sk_buff *skb)
381 struct dst_entry *dst = skb_dst(skb);
382 struct ipv6hdr *hdr = ipv6_hdr(skb);
383 struct inet6_skb_parm *opt = IP6CB(skb);
384 struct net *net = dev_net(dst->dev);
387 if (net->ipv6.devconf_all->forwarding == 0)
390 if (skb->pkt_type != PACKET_HOST)
393 if (skb_warn_if_lro(skb))
396 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
397 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
398 IPSTATS_MIB_INDISCARDS);
402 skb_forward_csum(skb);
405 * We DO NOT make any processing on
406 * RA packets, pushing them to user level AS IS
407 * without ane WARRANTY that application will be able
408 * to interpret them. The reason is that we
409 * cannot make anything clever here.
411 * We are not end-node, so that if packet contains
412 * AH/ESP, we cannot make anything.
413 * Defragmentation also would be mistake, RA packets
414 * cannot be fragmented, because there is no warranty
415 * that different fragments will go along one path. --ANK
417 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
418 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
423 * check and decrement ttl
425 if (hdr->hop_limit <= 1) {
426 /* Force OUTPUT device used as source address */
428 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
429 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
430 IPSTATS_MIB_INHDRERRORS);
436 /* XXX: idev->cnf.proxy_ndp? */
437 if (net->ipv6.devconf_all->proxy_ndp &&
438 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
439 int proxied = ip6_forward_proxy_check(skb);
441 return ip6_input(skb);
442 else if (proxied < 0) {
443 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
444 IPSTATS_MIB_INDISCARDS);
449 if (!xfrm6_route_forward(skb)) {
450 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
451 IPSTATS_MIB_INDISCARDS);
456 /* IPv6 specs say nothing about it, but it is clear that we cannot
457 send redirects to source routed frames.
458 We don't send redirects to frames decapsulated from IPsec.
460 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
461 struct in6_addr *target = NULL;
462 struct inet_peer *peer;
466 * incoming and outgoing devices are the same
470 rt = (struct rt6_info *) dst;
471 if (rt->rt6i_flags & RTF_GATEWAY)
472 target = &rt->rt6i_gateway;
474 target = &hdr->daddr;
476 peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
478 /* Limit redirects both by destination (here)
479 and by source (inside ndisc_send_redirect)
481 if (inet_peer_xrlim_allow(peer, 1*HZ))
482 ndisc_send_redirect(skb, target);
486 int addrtype = ipv6_addr_type(&hdr->saddr);
488 /* This check is security critical. */
489 if (addrtype == IPV6_ADDR_ANY ||
490 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
492 if (addrtype & IPV6_ADDR_LINKLOCAL) {
493 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
494 ICMPV6_NOT_NEIGHBOUR, 0);
499 mtu = ip6_dst_mtu_forward(dst);
500 if (mtu < IPV6_MIN_MTU)
503 if (ip6_pkt_too_big(skb, mtu)) {
504 /* Again, force OUTPUT device used as source address */
506 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
507 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
508 IPSTATS_MIB_INTOOBIGERRORS);
509 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
510 IPSTATS_MIB_FRAGFAILS);
515 if (skb_cow(skb, dst->dev->hard_header_len)) {
516 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
517 IPSTATS_MIB_OUTDISCARDS);
523 /* Mangling hops number delayed to point after skb COW */
527 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
528 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
529 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
530 net, NULL, skb, skb->dev, dst->dev,
534 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
540 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
542 to->pkt_type = from->pkt_type;
543 to->priority = from->priority;
544 to->protocol = from->protocol;
546 skb_dst_set(to, dst_clone(skb_dst(from)));
548 to->mark = from->mark;
550 #ifdef CONFIG_NET_SCHED
551 to->tc_index = from->tc_index;
554 skb_copy_secmark(to, from);
557 int ip6_fragment(struct sock *sk, struct sk_buff *skb,
558 int (*output)(struct sock *, struct sk_buff *))
560 struct sk_buff *frag;
561 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
562 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
563 inet6_sk(skb->sk) : NULL;
564 struct ipv6hdr *tmp_hdr;
566 unsigned int mtu, hlen, left, len;
569 int ptr, offset = 0, err = 0;
570 u8 *prevhdr, nexthdr = 0;
571 struct net *net = dev_net(skb_dst(skb)->dev);
573 hlen = ip6_find_1stfragopt(skb, &prevhdr);
576 mtu = ip6_skb_dst_mtu(skb);
578 /* We must not fragment if the socket is set to force MTU discovery
579 * or if the skb it not generated by a local socket.
581 if (unlikely(!skb->ignore_df && skb->len > mtu))
584 if (IP6CB(skb)->frag_max_size) {
585 if (IP6CB(skb)->frag_max_size > mtu)
588 /* don't send fragments larger than what we received */
589 mtu = IP6CB(skb)->frag_max_size;
590 if (mtu < IPV6_MIN_MTU)
594 if (np && np->frag_size < mtu) {
598 mtu -= hlen + sizeof(struct frag_hdr);
600 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
601 &ipv6_hdr(skb)->saddr);
603 hroom = LL_RESERVED_SPACE(rt->dst.dev);
604 if (skb_has_frag_list(skb)) {
605 int first_len = skb_pagelen(skb);
606 struct sk_buff *frag2;
608 if (first_len - hlen > mtu ||
609 ((first_len - hlen) & 7) ||
611 skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
614 skb_walk_frags(skb, frag) {
615 /* Correct geometry. */
616 if (frag->len > mtu ||
617 ((frag->len & 7) && frag->next) ||
618 skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
619 goto slow_path_clean;
621 /* Partially cloned skb? */
622 if (skb_shared(frag))
623 goto slow_path_clean;
628 frag->destructor = sock_wfree;
630 skb->truesize -= frag->truesize;
637 *prevhdr = NEXTHDR_FRAGMENT;
638 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
640 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
641 IPSTATS_MIB_FRAGFAILS);
645 frag = skb_shinfo(skb)->frag_list;
646 skb_frag_list_init(skb);
648 __skb_pull(skb, hlen);
649 fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
650 __skb_push(skb, hlen);
651 skb_reset_network_header(skb);
652 memcpy(skb_network_header(skb), tmp_hdr, hlen);
654 fh->nexthdr = nexthdr;
656 fh->frag_off = htons(IP6_MF);
657 fh->identification = frag_id;
659 first_len = skb_pagelen(skb);
660 skb->data_len = first_len - skb_headlen(skb);
661 skb->len = first_len;
662 ipv6_hdr(skb)->payload_len = htons(first_len -
663 sizeof(struct ipv6hdr));
668 /* Prepare header of the next frame,
669 * before previous one went down. */
671 frag->ip_summed = CHECKSUM_NONE;
672 skb_reset_transport_header(frag);
673 fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
674 __skb_push(frag, hlen);
675 skb_reset_network_header(frag);
676 memcpy(skb_network_header(frag), tmp_hdr,
678 offset += skb->len - hlen - sizeof(struct frag_hdr);
679 fh->nexthdr = nexthdr;
681 fh->frag_off = htons(offset);
683 fh->frag_off |= htons(IP6_MF);
684 fh->identification = frag_id;
685 ipv6_hdr(frag)->payload_len =
687 sizeof(struct ipv6hdr));
688 ip6_copy_metadata(frag, skb);
691 err = output(sk, skb);
693 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
694 IPSTATS_MIB_FRAGCREATES);
707 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
708 IPSTATS_MIB_FRAGOKS);
713 kfree_skb_list(frag);
715 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
716 IPSTATS_MIB_FRAGFAILS);
721 skb_walk_frags(skb, frag2) {
725 frag2->destructor = NULL;
726 skb->truesize += frag2->truesize;
731 if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
732 skb_checksum_help(skb))
735 left = skb->len - hlen; /* Space per frame */
736 ptr = hlen; /* Where to start from */
739 * Fragment the datagram.
742 *prevhdr = NEXTHDR_FRAGMENT;
743 troom = rt->dst.dev->needed_tailroom;
746 * Keep copying data until we run out.
750 /* IF: it doesn't fit, use 'mtu' - the data space left */
753 /* IF: we are not sending up to and including the packet end
754 then align the next start on an eight byte boundary */
759 /* Allocate buffer */
760 frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
761 hroom + troom, GFP_ATOMIC);
763 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
764 IPSTATS_MIB_FRAGFAILS);
770 * Set up data on packet
773 ip6_copy_metadata(frag, skb);
774 skb_reserve(frag, hroom);
775 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
776 skb_reset_network_header(frag);
777 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
778 frag->transport_header = (frag->network_header + hlen +
779 sizeof(struct frag_hdr));
782 * Charge the memory for the fragment to any owner
786 skb_set_owner_w(frag, skb->sk);
789 * Copy the packet header into the new buffer.
791 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
794 * Build fragment header.
796 fh->nexthdr = nexthdr;
798 fh->identification = frag_id;
801 * Copy a block of the IP datagram.
803 BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
807 fh->frag_off = htons(offset);
809 fh->frag_off |= htons(IP6_MF);
810 ipv6_hdr(frag)->payload_len = htons(frag->len -
811 sizeof(struct ipv6hdr));
817 * Put this fragment into the sending queue.
819 err = output(sk, frag);
823 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
824 IPSTATS_MIB_FRAGCREATES);
826 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
827 IPSTATS_MIB_FRAGOKS);
832 if (skb->sk && dst_allfrag(skb_dst(skb)))
833 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
835 skb->dev = skb_dst(skb)->dev;
836 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
840 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
841 IPSTATS_MIB_FRAGFAILS);
846 static inline int ip6_rt_check(const struct rt6key *rt_key,
847 const struct in6_addr *fl_addr,
848 const struct in6_addr *addr_cache)
850 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
851 (!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
854 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
855 struct dst_entry *dst,
856 const struct flowi6 *fl6)
858 struct ipv6_pinfo *np = inet6_sk(sk);
864 if (dst->ops->family != AF_INET6) {
869 rt = (struct rt6_info *)dst;
870 /* Yes, checking route validity in not connected
871 * case is not very simple. Take into account,
872 * that we do not support routing by source, TOS,
873 * and MSG_DONTROUTE --ANK (980726)
875 * 1. ip6_rt_check(): If route was host route,
876 * check that cached destination is current.
877 * If it is network route, we still may
878 * check its validity using saved pointer
879 * to the last used address: daddr_cache.
880 * We do not want to save whole address now,
881 * (because main consumer of this service
882 * is tcp, which has not this problem),
883 * so that the last trick works only on connected
885 * 2. oif also should be the same.
887 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
888 #ifdef CONFIG_IPV6_SUBTREES
889 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
891 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
900 static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
901 struct dst_entry **dst, struct flowi6 *fl6)
903 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
909 /* The correct way to handle this would be to do
910 * ip6_route_get_saddr, and then ip6_route_output; however,
911 * the route-specific preferred source forces the
912 * ip6_route_output call _before_ ip6_route_get_saddr.
914 * In source specific routing (no src=any default route),
915 * ip6_route_output will fail given src=any saddr, though, so
916 * that's why we try it again later.
918 if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
920 bool had_dst = *dst != NULL;
923 *dst = ip6_route_output(net, sk, fl6);
924 rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
925 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
926 sk ? inet6_sk(sk)->srcprefs : 0,
929 goto out_err_release;
931 /* If we had an erroneous initial result, pretend it
932 * never existed and let the SA-enabled version take
935 if (!had_dst && (*dst)->error) {
942 *dst = ip6_route_output(net, sk, fl6);
946 goto out_err_release;
948 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
950 * Here if the dst entry we've looked up
951 * has a neighbour entry that is in the INCOMPLETE
952 * state and the src address from the flow is
953 * marked as OPTIMISTIC, we release the found
954 * dst entry and replace it instead with the
955 * dst entry of the nexthop router
957 rt = (struct rt6_info *) *dst;
959 n = __ipv6_neigh_lookup_noref(rt->dst.dev,
960 rt6_nexthop(rt, &fl6->daddr));
961 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
962 rcu_read_unlock_bh();
965 struct inet6_ifaddr *ifp;
966 struct flowi6 fl_gw6;
969 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
972 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
978 * We need to get the dst entry for the
979 * default router instead
982 memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
983 memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
984 *dst = ip6_route_output(net, sk, &fl_gw6);
987 goto out_err_release;
995 if (err == -ENETUNREACH)
996 IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1003 * ip6_dst_lookup - perform route lookup on flow
1004 * @sk: socket which provides route info
1005 * @dst: pointer to dst_entry * for result
1006 * @fl6: flow to lookup
1008 * This function performs a route lookup on the given flow.
1010 * It returns zero on success, or a standard errno code on error.
1012 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1016 return ip6_dst_lookup_tail(net, sk, dst, fl6);
1018 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1021 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1022 * @sk: socket which provides route info
1023 * @fl6: flow to lookup
1024 * @final_dst: final destination address for ipsec lookup
1026 * This function performs a route lookup on the given flow.
1028 * It returns a valid dst pointer on success, or a pointer encoded
1031 struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
1032 const struct in6_addr *final_dst)
1034 struct dst_entry *dst = NULL;
1037 err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1039 return ERR_PTR(err);
1041 fl6->daddr = *final_dst;
1042 if (!fl6->flowi6_oif)
1043 fl6->flowi6_oif = dst->dev->ifindex;
1045 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1047 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1050 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1051 * @sk: socket which provides the dst cache and route info
1052 * @fl6: flow to lookup
1053 * @final_dst: final destination address for ipsec lookup
1055 * This function performs a route lookup on the given flow with the
1056 * possibility of using the cached route in the socket if it is valid.
1057 * It will take the socket dst lock when operating on the dst cache.
1058 * As a result, this function can only be used in process context.
1060 * It returns a valid dst pointer on success, or a pointer encoded
1063 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1064 const struct in6_addr *final_dst)
1066 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1069 dst = ip6_sk_dst_check(sk, dst, fl6);
1071 err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1073 return ERR_PTR(err);
1075 fl6->daddr = *final_dst;
1077 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1079 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1081 static inline int ip6_ufo_append_data(struct sock *sk,
1082 struct sk_buff_head *queue,
1083 int getfrag(void *from, char *to, int offset, int len,
1084 int odd, struct sk_buff *skb),
1085 void *from, int length, int hh_len, int fragheaderlen,
1086 int transhdrlen, int mtu, unsigned int flags,
1087 const struct flowi6 *fl6)
1090 struct sk_buff *skb;
1093 /* There is support for UDP large send offload by network
1094 * device, so create one single skb packet containing complete
1097 skb = skb_peek_tail(queue);
1099 skb = sock_alloc_send_skb(sk,
1100 hh_len + fragheaderlen + transhdrlen + 20,
1101 (flags & MSG_DONTWAIT), &err);
1105 /* reserve space for Hardware header */
1106 skb_reserve(skb, hh_len);
1108 /* create space for UDP/IP header */
1109 skb_put(skb, fragheaderlen + transhdrlen);
1111 /* initialize network header pointer */
1112 skb_reset_network_header(skb);
1114 /* initialize protocol header pointer */
1115 skb->transport_header = skb->network_header + fragheaderlen;
1117 skb->protocol = htons(ETH_P_IPV6);
1120 __skb_queue_tail(queue, skb);
1121 } else if (skb_is_gso(skb)) {
1125 skb->ip_summed = CHECKSUM_PARTIAL;
1126 /* Specify the length of each IPv6 datagram fragment.
1127 * It has to be a multiple of 8.
1129 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1130 sizeof(struct frag_hdr)) & ~7;
1131 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1132 skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk),
1137 return skb_append_datato_frags(sk, skb, getfrag, from,
1138 (length - transhdrlen));
1141 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1144 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1147 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1150 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1153 static void ip6_append_data_mtu(unsigned int *mtu,
1155 unsigned int fragheaderlen,
1156 struct sk_buff *skb,
1157 struct rt6_info *rt,
1158 unsigned int orig_mtu)
1160 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1162 /* first fragment, reserve header_len */
1163 *mtu = orig_mtu - rt->dst.header_len;
1167 * this fragment is not first, the headers
1168 * space is regarded as data space.
1172 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1173 + fragheaderlen - sizeof(struct frag_hdr);
1177 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1178 struct inet6_cork *v6_cork,
1179 int hlimit, int tclass, struct ipv6_txoptions *opt,
1180 struct rt6_info *rt, struct flowi6 *fl6)
1182 struct ipv6_pinfo *np = inet6_sk(sk);
1189 if (WARN_ON(v6_cork->opt))
1192 v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
1193 if (unlikely(!v6_cork->opt))
1196 v6_cork->opt->tot_len = opt->tot_len;
1197 v6_cork->opt->opt_flen = opt->opt_flen;
1198 v6_cork->opt->opt_nflen = opt->opt_nflen;
1200 v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1202 if (opt->dst0opt && !v6_cork->opt->dst0opt)
1205 v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1207 if (opt->dst1opt && !v6_cork->opt->dst1opt)
1210 v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1212 if (opt->hopopt && !v6_cork->opt->hopopt)
1215 v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1217 if (opt->srcrt && !v6_cork->opt->srcrt)
1220 /* need source address above miyazawa*/
1223 cork->base.dst = &rt->dst;
1224 cork->fl.u.ip6 = *fl6;
1225 v6_cork->hop_limit = hlimit;
1226 v6_cork->tclass = tclass;
1227 if (rt->dst.flags & DST_XFRM_TUNNEL)
1228 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1229 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1231 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1232 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1233 if (np->frag_size < mtu) {
1235 mtu = np->frag_size;
1237 cork->base.fragsize = mtu;
1238 if (dst_allfrag(rt->dst.path))
1239 cork->base.flags |= IPCORK_ALLFRAG;
1240 cork->base.length = 0;
1245 static int __ip6_append_data(struct sock *sk,
1247 struct sk_buff_head *queue,
1248 struct inet_cork *cork,
1249 struct inet6_cork *v6_cork,
1250 struct page_frag *pfrag,
1251 int getfrag(void *from, char *to, int offset,
1252 int len, int odd, struct sk_buff *skb),
1253 void *from, int length, int transhdrlen,
1254 unsigned int flags, int dontfrag)
1256 struct sk_buff *skb, *skb_prev = NULL;
1257 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1259 int dst_exthdrlen = 0;
1266 struct rt6_info *rt = (struct rt6_info *)cork->dst;
1267 struct ipv6_txoptions *opt = v6_cork->opt;
1268 int csummode = CHECKSUM_NONE;
1270 skb = skb_peek_tail(queue);
1272 exthdrlen = opt ? opt->opt_flen : 0;
1273 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1276 mtu = cork->fragsize;
1279 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1281 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1282 (opt ? opt->opt_nflen : 0);
1283 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1284 sizeof(struct frag_hdr);
1286 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1287 unsigned int maxnonfragsize, headersize;
1289 headersize = sizeof(struct ipv6hdr) +
1290 (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1291 (dst_allfrag(&rt->dst) ?
1292 sizeof(struct frag_hdr) : 0) +
1293 rt->rt6i_nfheader_len;
1295 if (ip6_sk_ignore_df(sk))
1296 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1298 maxnonfragsize = mtu;
1300 /* dontfrag active */
1301 if ((cork->length + length > mtu - headersize) && dontfrag &&
1302 (sk->sk_protocol == IPPROTO_UDP ||
1303 sk->sk_protocol == IPPROTO_RAW)) {
1304 ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1305 sizeof(struct ipv6hdr));
1309 if (cork->length + length > maxnonfragsize - headersize) {
1311 ipv6_local_error(sk, EMSGSIZE, fl6,
1313 sizeof(struct ipv6hdr));
1318 if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1319 sock_tx_timestamp(sk, &tx_flags);
1320 if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1321 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1322 tskey = sk->sk_tskey++;
1325 /* If this is the first and only packet and device
1326 * supports checksum offloading, let's use it.
1327 * Use transhdrlen, same as IPv4, because partial
1328 * sums only work when transhdrlen is set.
1330 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1331 length + fragheaderlen < mtu &&
1332 rt->dst.dev->features & NETIF_F_V6_CSUM &&
1334 csummode = CHECKSUM_PARTIAL;
1336 * Let's try using as much space as possible.
1337 * Use MTU if total length of the message fits into the MTU.
1338 * Otherwise, we need to reserve fragment header and
1339 * fragment alignment (= 8-15 octects, in total).
1341 * Note that we may need to "move" the data from the tail of
1342 * of the buffer to the new fragment when we split
1345 * FIXME: It may be fragmented into multiple chunks
1346 * at once if non-fragmentable extension headers
1351 cork->length += length;
1352 if (((length > mtu) ||
1353 (skb && skb_is_gso(skb))) &&
1354 (sk->sk_protocol == IPPROTO_UDP) &&
1355 (rt->dst.dev->features & NETIF_F_UFO) &&
1356 (sk->sk_type == SOCK_DGRAM)) {
1357 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1358 hh_len, fragheaderlen,
1359 transhdrlen, mtu, flags, fl6);
1368 while (length > 0) {
1369 /* Check if the remaining data fits into current packet. */
1370 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1372 copy = maxfraglen - skb->len;
1376 unsigned int datalen;
1377 unsigned int fraglen;
1378 unsigned int fraggap;
1379 unsigned int alloclen;
1381 /* There's no room in the current skb */
1383 fraggap = skb->len - maxfraglen;
1386 /* update mtu and maxfraglen if necessary */
1387 if (!skb || !skb_prev)
1388 ip6_append_data_mtu(&mtu, &maxfraglen,
1389 fragheaderlen, skb, rt,
1395 * If remaining data exceeds the mtu,
1396 * we know we need more fragment(s).
1398 datalen = length + fraggap;
1400 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1401 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1402 if ((flags & MSG_MORE) &&
1403 !(rt->dst.dev->features&NETIF_F_SG))
1406 alloclen = datalen + fragheaderlen;
1408 alloclen += dst_exthdrlen;
1410 if (datalen != length + fraggap) {
1412 * this is not the last fragment, the trailer
1413 * space is regarded as data space.
1415 datalen += rt->dst.trailer_len;
1418 alloclen += rt->dst.trailer_len;
1419 fraglen = datalen + fragheaderlen;
1422 * We just reserve space for fragment header.
1423 * Note: this may be overallocation if the message
1424 * (without MSG_MORE) fits into the MTU.
1426 alloclen += sizeof(struct frag_hdr);
1429 skb = sock_alloc_send_skb(sk,
1431 (flags & MSG_DONTWAIT), &err);
1434 if (atomic_read(&sk->sk_wmem_alloc) <=
1436 skb = sock_wmalloc(sk,
1437 alloclen + hh_len, 1,
1445 * Fill in the control structures
1447 skb->protocol = htons(ETH_P_IPV6);
1448 skb->ip_summed = csummode;
1450 /* reserve for fragmentation and ipsec header */
1451 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1454 /* Only the initial fragment is time stamped */
1455 skb_shinfo(skb)->tx_flags = tx_flags;
1457 skb_shinfo(skb)->tskey = tskey;
1461 * Find where to start putting bytes
1463 data = skb_put(skb, fraglen);
1464 skb_set_network_header(skb, exthdrlen);
1465 data += fragheaderlen;
1466 skb->transport_header = (skb->network_header +
1469 skb->csum = skb_copy_and_csum_bits(
1470 skb_prev, maxfraglen,
1471 data + transhdrlen, fraggap, 0);
1472 skb_prev->csum = csum_sub(skb_prev->csum,
1475 pskb_trim_unique(skb_prev, maxfraglen);
1477 copy = datalen - transhdrlen - fraggap;
1483 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1490 length -= datalen - fraggap;
1496 * Put the packet on the pending queue
1498 __skb_queue_tail(queue, skb);
1505 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1509 if (getfrag(from, skb_put(skb, copy),
1510 offset, copy, off, skb) < 0) {
1511 __skb_trim(skb, off);
1516 int i = skb_shinfo(skb)->nr_frags;
1519 if (!sk_page_frag_refill(sk, pfrag))
1522 if (!skb_can_coalesce(skb, i, pfrag->page,
1525 if (i == MAX_SKB_FRAGS)
1528 __skb_fill_page_desc(skb, i, pfrag->page,
1530 skb_shinfo(skb)->nr_frags = ++i;
1531 get_page(pfrag->page);
1533 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1535 page_address(pfrag->page) + pfrag->offset,
1536 offset, copy, skb->len, skb) < 0)
1539 pfrag->offset += copy;
1540 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1542 skb->data_len += copy;
1543 skb->truesize += copy;
1544 atomic_add(copy, &sk->sk_wmem_alloc);
1555 cork->length -= length;
1556 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1560 int ip6_append_data(struct sock *sk,
1561 int getfrag(void *from, char *to, int offset, int len,
1562 int odd, struct sk_buff *skb),
1563 void *from, int length, int transhdrlen, int hlimit,
1564 int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1565 struct rt6_info *rt, unsigned int flags, int dontfrag)
1567 struct inet_sock *inet = inet_sk(sk);
1568 struct ipv6_pinfo *np = inet6_sk(sk);
1572 if (flags&MSG_PROBE)
1574 if (skb_queue_empty(&sk->sk_write_queue)) {
1578 err = ip6_setup_cork(sk, &inet->cork, &np->cork, hlimit,
1579 tclass, opt, rt, fl6);
1583 exthdrlen = (opt ? opt->opt_flen : 0);
1584 length += exthdrlen;
1585 transhdrlen += exthdrlen;
1587 fl6 = &inet->cork.fl.u.ip6;
1591 return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1592 &np->cork, sk_page_frag(sk), getfrag,
1593 from, length, transhdrlen, flags, dontfrag);
1595 EXPORT_SYMBOL_GPL(ip6_append_data);
1597 static void ip6_cork_release(struct inet_cork_full *cork,
1598 struct inet6_cork *v6_cork)
1601 kfree(v6_cork->opt->dst0opt);
1602 kfree(v6_cork->opt->dst1opt);
1603 kfree(v6_cork->opt->hopopt);
1604 kfree(v6_cork->opt->srcrt);
1605 kfree(v6_cork->opt);
1606 v6_cork->opt = NULL;
1609 if (cork->base.dst) {
1610 dst_release(cork->base.dst);
1611 cork->base.dst = NULL;
1612 cork->base.flags &= ~IPCORK_ALLFRAG;
1614 memset(&cork->fl, 0, sizeof(cork->fl));
1617 struct sk_buff *__ip6_make_skb(struct sock *sk,
1618 struct sk_buff_head *queue,
1619 struct inet_cork_full *cork,
1620 struct inet6_cork *v6_cork)
1622 struct sk_buff *skb, *tmp_skb;
1623 struct sk_buff **tail_skb;
1624 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1625 struct ipv6_pinfo *np = inet6_sk(sk);
1626 struct net *net = sock_net(sk);
1627 struct ipv6hdr *hdr;
1628 struct ipv6_txoptions *opt = v6_cork->opt;
1629 struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1630 struct flowi6 *fl6 = &cork->fl.u.ip6;
1631 unsigned char proto = fl6->flowi6_proto;
1633 skb = __skb_dequeue(queue);
1636 tail_skb = &(skb_shinfo(skb)->frag_list);
1638 /* move skb->data to ip header from ext header */
1639 if (skb->data < skb_network_header(skb))
1640 __skb_pull(skb, skb_network_offset(skb));
1641 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1642 __skb_pull(tmp_skb, skb_network_header_len(skb));
1643 *tail_skb = tmp_skb;
1644 tail_skb = &(tmp_skb->next);
1645 skb->len += tmp_skb->len;
1646 skb->data_len += tmp_skb->len;
1647 skb->truesize += tmp_skb->truesize;
1648 tmp_skb->destructor = NULL;
1652 /* Allow local fragmentation. */
1653 skb->ignore_df = ip6_sk_ignore_df(sk);
1655 *final_dst = fl6->daddr;
1656 __skb_pull(skb, skb_network_header_len(skb));
1657 if (opt && opt->opt_flen)
1658 ipv6_push_frag_opts(skb, opt, &proto);
1659 if (opt && opt->opt_nflen)
1660 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1662 skb_push(skb, sizeof(struct ipv6hdr));
1663 skb_reset_network_header(skb);
1664 hdr = ipv6_hdr(skb);
1666 ip6_flow_hdr(hdr, v6_cork->tclass,
1667 ip6_make_flowlabel(net, skb, fl6->flowlabel,
1668 np->autoflowlabel, fl6));
1669 hdr->hop_limit = v6_cork->hop_limit;
1670 hdr->nexthdr = proto;
1671 hdr->saddr = fl6->saddr;
1672 hdr->daddr = *final_dst;
1674 skb->priority = sk->sk_priority;
1675 skb->mark = sk->sk_mark;
1677 skb_dst_set(skb, dst_clone(&rt->dst));
1678 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1679 if (proto == IPPROTO_ICMPV6) {
1680 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1682 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1683 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1686 ip6_cork_release(cork, v6_cork);
1691 int ip6_send_skb(struct sk_buff *skb)
1693 struct net *net = sock_net(skb->sk);
1694 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1697 err = ip6_local_out(skb);
1700 err = net_xmit_errno(err);
1702 IP6_INC_STATS(net, rt->rt6i_idev,
1703 IPSTATS_MIB_OUTDISCARDS);
1709 int ip6_push_pending_frames(struct sock *sk)
1711 struct sk_buff *skb;
1713 skb = ip6_finish_skb(sk);
1717 return ip6_send_skb(skb);
1719 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1721 static void __ip6_flush_pending_frames(struct sock *sk,
1722 struct sk_buff_head *queue,
1723 struct inet_cork_full *cork,
1724 struct inet6_cork *v6_cork)
1726 struct sk_buff *skb;
1728 while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1730 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1731 IPSTATS_MIB_OUTDISCARDS);
1735 ip6_cork_release(cork, v6_cork);
1738 void ip6_flush_pending_frames(struct sock *sk)
1740 __ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1741 &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1743 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1745 struct sk_buff *ip6_make_skb(struct sock *sk,
1746 int getfrag(void *from, char *to, int offset,
1747 int len, int odd, struct sk_buff *skb),
1748 void *from, int length, int transhdrlen,
1749 int hlimit, int tclass,
1750 struct ipv6_txoptions *opt, struct flowi6 *fl6,
1751 struct rt6_info *rt, unsigned int flags,
1754 struct inet_cork_full cork;
1755 struct inet6_cork v6_cork;
1756 struct sk_buff_head queue;
1757 int exthdrlen = (opt ? opt->opt_flen : 0);
1760 if (flags & MSG_PROBE)
1763 __skb_queue_head_init(&queue);
1765 cork.base.flags = 0;
1767 cork.base.opt = NULL;
1769 err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
1771 return ERR_PTR(err);
1774 dontfrag = inet6_sk(sk)->dontfrag;
1776 err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
1777 ¤t->task_frag, getfrag, from,
1778 length + exthdrlen, transhdrlen + exthdrlen,
1781 __ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
1782 return ERR_PTR(err);
1785 return __ip6_make_skb(sk, &queue, &cork, &v6_cork);