2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
58 #include <net/l3mdev.h>
60 static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
62 struct dst_entry *dst = skb_dst(skb);
63 struct net_device *dev = dst->dev;
64 struct neighbour *neigh;
65 struct in6_addr *nexthop;
68 skb->protocol = htons(ETH_P_IPV6);
71 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
72 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
74 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
75 ((mroute6_socket(net, skb) &&
76 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
77 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
78 &ipv6_hdr(skb)->saddr))) {
79 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
81 /* Do not check for IFF_ALLMULTI; multicast routing
82 is not supported in any case.
85 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
86 net, sk, newskb, NULL, newskb->dev,
89 if (ipv6_hdr(skb)->hop_limit == 0) {
90 IP6_INC_STATS(net, idev,
91 IPSTATS_MIB_OUTDISCARDS);
97 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
99 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 IPV6_ADDR_SCOPE_NODELOCAL &&
101 !(dev->flags & IFF_LOOPBACK)) {
108 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
109 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 if (unlikely(!neigh))
111 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 if (!IS_ERR(neigh)) {
113 ret = dst_neigh_output(dst, neigh, skb);
114 rcu_read_unlock_bh();
117 rcu_read_unlock_bh();
119 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
124 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
126 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
127 dst_allfrag(skb_dst(skb)) ||
128 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
129 return ip6_fragment(net, sk, skb, ip6_finish_output2);
131 return ip6_finish_output2(net, sk, skb);
134 int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
136 struct net_device *dev = skb_dst(skb)->dev;
137 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
139 if (unlikely(idev->cnf.disable_ipv6)) {
140 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
145 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
146 net, sk, skb, NULL, dev,
148 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
152 * xmit an sk_buff (used by TCP, SCTP and DCCP)
153 * Note : socket lock is not held for SYNACK packets, but might be modified
154 * by calls to skb_set_owner_w() and ipv6_local_error(),
155 * which are using proper atomic operations or spinlocks.
157 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
158 struct ipv6_txoptions *opt, int tclass)
160 struct net *net = sock_net(sk);
161 const struct ipv6_pinfo *np = inet6_sk(sk);
162 struct in6_addr *first_hop = &fl6->daddr;
163 struct dst_entry *dst = skb_dst(skb);
165 u8 proto = fl6->flowi6_proto;
166 int seg_len = skb->len;
171 unsigned int head_room;
173 /* First: exthdrs may take lots of space (~8K for now)
174 MAX_HEADER is not enough.
176 head_room = opt->opt_nflen + opt->opt_flen;
177 seg_len += head_room;
178 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
180 if (skb_headroom(skb) < head_room) {
181 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
183 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
184 IPSTATS_MIB_OUTDISCARDS);
190 /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
191 * it is safe to call in our context (socket lock not held)
193 skb_set_owner_w(skb, (struct sock *)sk);
196 ipv6_push_frag_opts(skb, opt, &proto);
198 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
201 skb_push(skb, sizeof(struct ipv6hdr));
202 skb_reset_network_header(skb);
206 * Fill in the IPv6 header
209 hlimit = np->hop_limit;
211 hlimit = ip6_dst_hoplimit(dst);
213 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
214 np->autoflowlabel, fl6));
216 hdr->payload_len = htons(seg_len);
217 hdr->nexthdr = proto;
218 hdr->hop_limit = hlimit;
220 hdr->saddr = fl6->saddr;
221 hdr->daddr = *first_hop;
223 skb->protocol = htons(ETH_P_IPV6);
224 skb->priority = sk->sk_priority;
225 skb->mark = sk->sk_mark;
228 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
229 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
230 IPSTATS_MIB_OUT, skb->len);
231 /* hooks should never assume socket lock is held.
232 * we promote our socket to non const
234 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
235 net, (struct sock *)sk, skb, NULL, dst->dev,
240 /* ipv6_local_error() does not require socket lock,
241 * we promote our socket to non const
243 ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
245 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
249 EXPORT_SYMBOL(ip6_xmit);
251 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
253 struct ip6_ra_chain *ra;
254 struct sock *last = NULL;
256 read_lock(&ip6_ra_lock);
257 for (ra = ip6_ra_chain; ra; ra = ra->next) {
258 struct sock *sk = ra->sk;
259 if (sk && ra->sel == sel &&
260 (!sk->sk_bound_dev_if ||
261 sk->sk_bound_dev_if == skb->dev->ifindex)) {
263 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
265 rawv6_rcv(last, skb2);
272 rawv6_rcv(last, skb);
273 read_unlock(&ip6_ra_lock);
276 read_unlock(&ip6_ra_lock);
280 static int ip6_forward_proxy_check(struct sk_buff *skb)
282 struct ipv6hdr *hdr = ipv6_hdr(skb);
283 u8 nexthdr = hdr->nexthdr;
287 if (ipv6_ext_hdr(nexthdr)) {
288 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
292 offset = sizeof(struct ipv6hdr);
294 if (nexthdr == IPPROTO_ICMPV6) {
295 struct icmp6hdr *icmp6;
297 if (!pskb_may_pull(skb, (skb_network_header(skb) +
298 offset + 1 - skb->data)))
301 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
303 switch (icmp6->icmp6_type) {
304 case NDISC_ROUTER_SOLICITATION:
305 case NDISC_ROUTER_ADVERTISEMENT:
306 case NDISC_NEIGHBOUR_SOLICITATION:
307 case NDISC_NEIGHBOUR_ADVERTISEMENT:
309 /* For reaction involving unicast neighbor discovery
310 * message destined to the proxied address, pass it to
320 * The proxying router can't forward traffic sent to a link-local
321 * address, so signal the sender and discard the packet. This
322 * behavior is clarified by the MIPv6 specification.
324 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
325 dst_link_failure(skb);
332 static inline int ip6_forward_finish(struct net *net, struct sock *sk,
335 skb_sender_cpu_clear(skb);
336 return dst_output(net, sk, skb);
339 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
342 struct inet6_dev *idev;
344 if (dst_metric_locked(dst, RTAX_MTU)) {
345 mtu = dst_metric_raw(dst, RTAX_MTU);
352 idev = __in6_dev_get(dst->dev);
354 mtu = idev->cnf.mtu6;
360 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
365 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
366 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
372 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
378 int ip6_forward(struct sk_buff *skb)
380 struct dst_entry *dst = skb_dst(skb);
381 struct ipv6hdr *hdr = ipv6_hdr(skb);
382 struct inet6_skb_parm *opt = IP6CB(skb);
383 struct net *net = dev_net(dst->dev);
386 if (net->ipv6.devconf_all->forwarding == 0)
389 if (skb->pkt_type != PACKET_HOST)
392 if (skb_warn_if_lro(skb))
395 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
396 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
397 IPSTATS_MIB_INDISCARDS);
401 skb_forward_csum(skb);
404 * We DO NOT make any processing on
405 * RA packets, pushing them to user level AS IS
406 * without ane WARRANTY that application will be able
407 * to interpret them. The reason is that we
408 * cannot make anything clever here.
410 * We are not end-node, so that if packet contains
411 * AH/ESP, we cannot make anything.
412 * Defragmentation also would be mistake, RA packets
413 * cannot be fragmented, because there is no warranty
414 * that different fragments will go along one path. --ANK
416 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
417 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
422 * check and decrement ttl
424 if (hdr->hop_limit <= 1) {
425 /* Force OUTPUT device used as source address */
427 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
428 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
429 IPSTATS_MIB_INHDRERRORS);
435 /* XXX: idev->cnf.proxy_ndp? */
436 if (net->ipv6.devconf_all->proxy_ndp &&
437 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
438 int proxied = ip6_forward_proxy_check(skb);
440 return ip6_input(skb);
441 else if (proxied < 0) {
442 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
443 IPSTATS_MIB_INDISCARDS);
448 if (!xfrm6_route_forward(skb)) {
449 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
450 IPSTATS_MIB_INDISCARDS);
455 /* IPv6 specs say nothing about it, but it is clear that we cannot
456 send redirects to source routed frames.
457 We don't send redirects to frames decapsulated from IPsec.
459 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
460 struct in6_addr *target = NULL;
461 struct inet_peer *peer;
465 * incoming and outgoing devices are the same
469 rt = (struct rt6_info *) dst;
470 if (rt->rt6i_flags & RTF_GATEWAY)
471 target = &rt->rt6i_gateway;
473 target = &hdr->daddr;
475 peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
477 /* Limit redirects both by destination (here)
478 and by source (inside ndisc_send_redirect)
480 if (inet_peer_xrlim_allow(peer, 1*HZ))
481 ndisc_send_redirect(skb, target);
485 int addrtype = ipv6_addr_type(&hdr->saddr);
487 /* This check is security critical. */
488 if (addrtype == IPV6_ADDR_ANY ||
489 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
491 if (addrtype & IPV6_ADDR_LINKLOCAL) {
492 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
493 ICMPV6_NOT_NEIGHBOUR, 0);
498 mtu = ip6_dst_mtu_forward(dst);
499 if (mtu < IPV6_MIN_MTU)
502 if (ip6_pkt_too_big(skb, mtu)) {
503 /* Again, force OUTPUT device used as source address */
505 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
506 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
507 IPSTATS_MIB_INTOOBIGERRORS);
508 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
509 IPSTATS_MIB_FRAGFAILS);
514 if (skb_cow(skb, dst->dev->hard_header_len)) {
515 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
516 IPSTATS_MIB_OUTDISCARDS);
522 /* Mangling hops number delayed to point after skb COW */
526 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
527 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
528 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
529 net, NULL, skb, skb->dev, dst->dev,
533 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
539 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
541 to->pkt_type = from->pkt_type;
542 to->priority = from->priority;
543 to->protocol = from->protocol;
545 skb_dst_set(to, dst_clone(skb_dst(from)));
547 to->mark = from->mark;
549 #ifdef CONFIG_NET_SCHED
550 to->tc_index = from->tc_index;
553 skb_copy_secmark(to, from);
556 int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
557 int (*output)(struct net *, struct sock *, struct sk_buff *))
559 struct sk_buff *frag;
560 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
561 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
562 inet6_sk(skb->sk) : NULL;
563 struct ipv6hdr *tmp_hdr;
565 unsigned int mtu, hlen, left, len;
568 int ptr, offset = 0, err = 0;
569 u8 *prevhdr, nexthdr = 0;
571 hlen = ip6_find_1stfragopt(skb, &prevhdr);
574 mtu = ip6_skb_dst_mtu(skb);
576 /* We must not fragment if the socket is set to force MTU discovery
577 * or if the skb it not generated by a local socket.
579 if (unlikely(!skb->ignore_df && skb->len > mtu))
582 if (IP6CB(skb)->frag_max_size) {
583 if (IP6CB(skb)->frag_max_size > mtu)
586 /* don't send fragments larger than what we received */
587 mtu = IP6CB(skb)->frag_max_size;
588 if (mtu < IPV6_MIN_MTU)
592 if (np && np->frag_size < mtu) {
596 mtu -= hlen + sizeof(struct frag_hdr);
598 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
599 &ipv6_hdr(skb)->saddr);
601 hroom = LL_RESERVED_SPACE(rt->dst.dev);
602 if (skb_has_frag_list(skb)) {
603 int first_len = skb_pagelen(skb);
604 struct sk_buff *frag2;
606 if (first_len - hlen > mtu ||
607 ((first_len - hlen) & 7) ||
609 skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
612 skb_walk_frags(skb, frag) {
613 /* Correct geometry. */
614 if (frag->len > mtu ||
615 ((frag->len & 7) && frag->next) ||
616 skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
617 goto slow_path_clean;
619 /* Partially cloned skb? */
620 if (skb_shared(frag))
621 goto slow_path_clean;
626 frag->destructor = sock_wfree;
628 skb->truesize -= frag->truesize;
635 *prevhdr = NEXTHDR_FRAGMENT;
636 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
638 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
639 IPSTATS_MIB_FRAGFAILS);
643 frag = skb_shinfo(skb)->frag_list;
644 skb_frag_list_init(skb);
646 __skb_pull(skb, hlen);
647 fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
648 __skb_push(skb, hlen);
649 skb_reset_network_header(skb);
650 memcpy(skb_network_header(skb), tmp_hdr, hlen);
652 fh->nexthdr = nexthdr;
654 fh->frag_off = htons(IP6_MF);
655 fh->identification = frag_id;
657 first_len = skb_pagelen(skb);
658 skb->data_len = first_len - skb_headlen(skb);
659 skb->len = first_len;
660 ipv6_hdr(skb)->payload_len = htons(first_len -
661 sizeof(struct ipv6hdr));
666 /* Prepare header of the next frame,
667 * before previous one went down. */
669 frag->ip_summed = CHECKSUM_NONE;
670 skb_reset_transport_header(frag);
671 fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
672 __skb_push(frag, hlen);
673 skb_reset_network_header(frag);
674 memcpy(skb_network_header(frag), tmp_hdr,
676 offset += skb->len - hlen - sizeof(struct frag_hdr);
677 fh->nexthdr = nexthdr;
679 fh->frag_off = htons(offset);
681 fh->frag_off |= htons(IP6_MF);
682 fh->identification = frag_id;
683 ipv6_hdr(frag)->payload_len =
685 sizeof(struct ipv6hdr));
686 ip6_copy_metadata(frag, skb);
689 err = output(net, sk, skb);
691 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
692 IPSTATS_MIB_FRAGCREATES);
705 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
706 IPSTATS_MIB_FRAGOKS);
711 kfree_skb_list(frag);
713 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
714 IPSTATS_MIB_FRAGFAILS);
719 skb_walk_frags(skb, frag2) {
723 frag2->destructor = NULL;
724 skb->truesize += frag2->truesize;
729 if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
730 skb_checksum_help(skb))
733 left = skb->len - hlen; /* Space per frame */
734 ptr = hlen; /* Where to start from */
737 * Fragment the datagram.
740 *prevhdr = NEXTHDR_FRAGMENT;
741 troom = rt->dst.dev->needed_tailroom;
744 * Keep copying data until we run out.
748 /* IF: it doesn't fit, use 'mtu' - the data space left */
751 /* IF: we are not sending up to and including the packet end
752 then align the next start on an eight byte boundary */
757 /* Allocate buffer */
758 frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
759 hroom + troom, GFP_ATOMIC);
761 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
762 IPSTATS_MIB_FRAGFAILS);
768 * Set up data on packet
771 ip6_copy_metadata(frag, skb);
772 skb_reserve(frag, hroom);
773 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
774 skb_reset_network_header(frag);
775 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
776 frag->transport_header = (frag->network_header + hlen +
777 sizeof(struct frag_hdr));
780 * Charge the memory for the fragment to any owner
784 skb_set_owner_w(frag, skb->sk);
787 * Copy the packet header into the new buffer.
789 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
792 * Build fragment header.
794 fh->nexthdr = nexthdr;
796 fh->identification = frag_id;
799 * Copy a block of the IP datagram.
801 BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
805 fh->frag_off = htons(offset);
807 fh->frag_off |= htons(IP6_MF);
808 ipv6_hdr(frag)->payload_len = htons(frag->len -
809 sizeof(struct ipv6hdr));
815 * Put this fragment into the sending queue.
817 err = output(net, sk, frag);
821 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
822 IPSTATS_MIB_FRAGCREATES);
824 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
825 IPSTATS_MIB_FRAGOKS);
830 if (skb->sk && dst_allfrag(skb_dst(skb)))
831 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
833 skb->dev = skb_dst(skb)->dev;
834 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
838 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
839 IPSTATS_MIB_FRAGFAILS);
844 static inline int ip6_rt_check(const struct rt6key *rt_key,
845 const struct in6_addr *fl_addr,
846 const struct in6_addr *addr_cache)
848 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
849 (!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
852 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
853 struct dst_entry *dst,
854 const struct flowi6 *fl6)
856 struct ipv6_pinfo *np = inet6_sk(sk);
862 if (dst->ops->family != AF_INET6) {
867 rt = (struct rt6_info *)dst;
868 /* Yes, checking route validity in not connected
869 * case is not very simple. Take into account,
870 * that we do not support routing by source, TOS,
871 * and MSG_DONTROUTE --ANK (980726)
873 * 1. ip6_rt_check(): If route was host route,
874 * check that cached destination is current.
875 * If it is network route, we still may
876 * check its validity using saved pointer
877 * to the last used address: daddr_cache.
878 * We do not want to save whole address now,
879 * (because main consumer of this service
880 * is tcp, which has not this problem),
881 * so that the last trick works only on connected
883 * 2. oif also should be the same.
885 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
886 #ifdef CONFIG_IPV6_SUBTREES
887 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
889 (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
890 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
899 static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
900 struct dst_entry **dst, struct flowi6 *fl6)
902 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
908 /* The correct way to handle this would be to do
909 * ip6_route_get_saddr, and then ip6_route_output; however,
910 * the route-specific preferred source forces the
911 * ip6_route_output call _before_ ip6_route_get_saddr.
913 * In source specific routing (no src=any default route),
914 * ip6_route_output will fail given src=any saddr, though, so
915 * that's why we try it again later.
917 if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
919 bool had_dst = *dst != NULL;
922 *dst = ip6_route_output(net, sk, fl6);
923 rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
924 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
925 sk ? inet6_sk(sk)->srcprefs : 0,
928 goto out_err_release;
930 /* If we had an erroneous initial result, pretend it
931 * never existed and let the SA-enabled version take
934 if (!had_dst && (*dst)->error) {
941 *dst = ip6_route_output(net, sk, fl6);
945 goto out_err_release;
947 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
949 * Here if the dst entry we've looked up
950 * has a neighbour entry that is in the INCOMPLETE
951 * state and the src address from the flow is
952 * marked as OPTIMISTIC, we release the found
953 * dst entry and replace it instead with the
954 * dst entry of the nexthop router
956 rt = (struct rt6_info *) *dst;
958 n = __ipv6_neigh_lookup_noref(rt->dst.dev,
959 rt6_nexthop(rt, &fl6->daddr));
960 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
961 rcu_read_unlock_bh();
964 struct inet6_ifaddr *ifp;
965 struct flowi6 fl_gw6;
968 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
971 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
977 * We need to get the dst entry for the
978 * default router instead
981 memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
982 memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
983 *dst = ip6_route_output(net, sk, &fl_gw6);
986 goto out_err_release;
994 if (err == -ENETUNREACH)
995 IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1002 * ip6_dst_lookup - perform route lookup on flow
1003 * @sk: socket which provides route info
1004 * @dst: pointer to dst_entry * for result
1005 * @fl6: flow to lookup
1007 * This function performs a route lookup on the given flow.
1009 * It returns zero on success, or a standard errno code on error.
1011 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1015 return ip6_dst_lookup_tail(net, sk, dst, fl6);
1017 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1020 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1021 * @sk: socket which provides route info
1022 * @fl6: flow to lookup
1023 * @final_dst: final destination address for ipsec lookup
1025 * This function performs a route lookup on the given flow.
1027 * It returns a valid dst pointer on success, or a pointer encoded
1030 struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
1031 const struct in6_addr *final_dst)
1033 struct dst_entry *dst = NULL;
1036 err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1038 return ERR_PTR(err);
1040 fl6->daddr = *final_dst;
1041 if (!fl6->flowi6_oif)
1042 fl6->flowi6_oif = l3mdev_fib_oif(dst->dev);
1044 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1046 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1049 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1050 * @sk: socket which provides the dst cache and route info
1051 * @fl6: flow to lookup
1052 * @final_dst: final destination address for ipsec lookup
1054 * This function performs a route lookup on the given flow with the
1055 * possibility of using the cached route in the socket if it is valid.
1056 * It will take the socket dst lock when operating on the dst cache.
1057 * As a result, this function can only be used in process context.
1059 * It returns a valid dst pointer on success, or a pointer encoded
1062 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1063 const struct in6_addr *final_dst)
1065 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1068 dst = ip6_sk_dst_check(sk, dst, fl6);
1070 err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1072 return ERR_PTR(err);
1074 fl6->daddr = *final_dst;
1076 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1078 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1080 static inline int ip6_ufo_append_data(struct sock *sk,
1081 struct sk_buff_head *queue,
1082 int getfrag(void *from, char *to, int offset, int len,
1083 int odd, struct sk_buff *skb),
1084 void *from, int length, int hh_len, int fragheaderlen,
1085 int transhdrlen, int mtu, unsigned int flags,
1086 const struct flowi6 *fl6)
1089 struct sk_buff *skb;
1092 /* There is support for UDP large send offload by network
1093 * device, so create one single skb packet containing complete
1096 skb = skb_peek_tail(queue);
1098 skb = sock_alloc_send_skb(sk,
1099 hh_len + fragheaderlen + transhdrlen + 20,
1100 (flags & MSG_DONTWAIT), &err);
1104 /* reserve space for Hardware header */
1105 skb_reserve(skb, hh_len);
1107 /* create space for UDP/IP header */
1108 skb_put(skb, fragheaderlen + transhdrlen);
1110 /* initialize network header pointer */
1111 skb_reset_network_header(skb);
1113 /* initialize protocol header pointer */
1114 skb->transport_header = skb->network_header + fragheaderlen;
1116 skb->protocol = htons(ETH_P_IPV6);
1119 __skb_queue_tail(queue, skb);
1120 } else if (skb_is_gso(skb)) {
1124 skb->ip_summed = CHECKSUM_PARTIAL;
1125 /* Specify the length of each IPv6 datagram fragment.
1126 * It has to be a multiple of 8.
1128 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1129 sizeof(struct frag_hdr)) & ~7;
1130 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1131 skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk),
1136 return skb_append_datato_frags(sk, skb, getfrag, from,
1137 (length - transhdrlen));
1140 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1143 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1146 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1149 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1152 static void ip6_append_data_mtu(unsigned int *mtu,
1154 unsigned int fragheaderlen,
1155 struct sk_buff *skb,
1156 struct rt6_info *rt,
1157 unsigned int orig_mtu)
1159 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1161 /* first fragment, reserve header_len */
1162 *mtu = orig_mtu - rt->dst.header_len;
1166 * this fragment is not first, the headers
1167 * space is regarded as data space.
1171 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1172 + fragheaderlen - sizeof(struct frag_hdr);
1176 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1177 struct inet6_cork *v6_cork,
1178 int hlimit, int tclass, struct ipv6_txoptions *opt,
1179 struct rt6_info *rt, struct flowi6 *fl6)
1181 struct ipv6_pinfo *np = inet6_sk(sk);
1188 if (WARN_ON(v6_cork->opt))
1191 v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
1192 if (unlikely(!v6_cork->opt))
1195 v6_cork->opt->tot_len = opt->tot_len;
1196 v6_cork->opt->opt_flen = opt->opt_flen;
1197 v6_cork->opt->opt_nflen = opt->opt_nflen;
1199 v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1201 if (opt->dst0opt && !v6_cork->opt->dst0opt)
1204 v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1206 if (opt->dst1opt && !v6_cork->opt->dst1opt)
1209 v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1211 if (opt->hopopt && !v6_cork->opt->hopopt)
1214 v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1216 if (opt->srcrt && !v6_cork->opt->srcrt)
1219 /* need source address above miyazawa*/
1222 cork->base.dst = &rt->dst;
1223 cork->fl.u.ip6 = *fl6;
1224 v6_cork->hop_limit = hlimit;
1225 v6_cork->tclass = tclass;
1226 if (rt->dst.flags & DST_XFRM_TUNNEL)
1227 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1228 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1230 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1231 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1232 if (np->frag_size < mtu) {
1234 mtu = np->frag_size;
1236 cork->base.fragsize = mtu;
1237 if (dst_allfrag(rt->dst.path))
1238 cork->base.flags |= IPCORK_ALLFRAG;
1239 cork->base.length = 0;
1244 static int __ip6_append_data(struct sock *sk,
1246 struct sk_buff_head *queue,
1247 struct inet_cork *cork,
1248 struct inet6_cork *v6_cork,
1249 struct page_frag *pfrag,
1250 int getfrag(void *from, char *to, int offset,
1251 int len, int odd, struct sk_buff *skb),
1252 void *from, int length, int transhdrlen,
1253 unsigned int flags, int dontfrag)
1255 struct sk_buff *skb, *skb_prev = NULL;
1256 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1258 int dst_exthdrlen = 0;
1265 struct rt6_info *rt = (struct rt6_info *)cork->dst;
1266 struct ipv6_txoptions *opt = v6_cork->opt;
1267 int csummode = CHECKSUM_NONE;
1269 skb = skb_peek_tail(queue);
1271 exthdrlen = opt ? opt->opt_flen : 0;
1272 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1275 mtu = cork->fragsize;
1278 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1280 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1281 (opt ? opt->opt_nflen : 0);
1282 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1283 sizeof(struct frag_hdr);
1285 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1286 unsigned int maxnonfragsize, headersize;
1288 headersize = sizeof(struct ipv6hdr) +
1289 (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1290 (dst_allfrag(&rt->dst) ?
1291 sizeof(struct frag_hdr) : 0) +
1292 rt->rt6i_nfheader_len;
1294 if (ip6_sk_ignore_df(sk))
1295 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1297 maxnonfragsize = mtu;
1299 /* dontfrag active */
1300 if ((cork->length + length > mtu - headersize) && dontfrag &&
1301 (sk->sk_protocol == IPPROTO_UDP ||
1302 sk->sk_protocol == IPPROTO_RAW)) {
1303 ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1304 sizeof(struct ipv6hdr));
1308 if (cork->length + length > maxnonfragsize - headersize) {
1310 ipv6_local_error(sk, EMSGSIZE, fl6,
1312 sizeof(struct ipv6hdr));
1317 if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1318 sock_tx_timestamp(sk, &tx_flags);
1319 if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1320 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1321 tskey = sk->sk_tskey++;
1324 /* If this is the first and only packet and device
1325 * supports checksum offloading, let's use it.
1326 * Use transhdrlen, same as IPv4, because partial
1327 * sums only work when transhdrlen is set.
1329 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1330 length + fragheaderlen < mtu &&
1331 rt->dst.dev->features & NETIF_F_V6_CSUM &&
1333 csummode = CHECKSUM_PARTIAL;
1335 * Let's try using as much space as possible.
1336 * Use MTU if total length of the message fits into the MTU.
1337 * Otherwise, we need to reserve fragment header and
1338 * fragment alignment (= 8-15 octects, in total).
1340 * Note that we may need to "move" the data from the tail of
1341 * of the buffer to the new fragment when we split
1344 * FIXME: It may be fragmented into multiple chunks
1345 * at once if non-fragmentable extension headers
1350 cork->length += length;
1351 if (((length > mtu) ||
1352 (skb && skb_is_gso(skb))) &&
1353 (sk->sk_protocol == IPPROTO_UDP) &&
1354 (rt->dst.dev->features & NETIF_F_UFO) &&
1355 (sk->sk_type == SOCK_DGRAM)) {
1356 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1357 hh_len, fragheaderlen,
1358 transhdrlen, mtu, flags, fl6);
1367 while (length > 0) {
1368 /* Check if the remaining data fits into current packet. */
1369 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1371 copy = maxfraglen - skb->len;
1375 unsigned int datalen;
1376 unsigned int fraglen;
1377 unsigned int fraggap;
1378 unsigned int alloclen;
1380 /* There's no room in the current skb */
1382 fraggap = skb->len - maxfraglen;
1385 /* update mtu and maxfraglen if necessary */
1386 if (!skb || !skb_prev)
1387 ip6_append_data_mtu(&mtu, &maxfraglen,
1388 fragheaderlen, skb, rt,
1394 * If remaining data exceeds the mtu,
1395 * we know we need more fragment(s).
1397 datalen = length + fraggap;
1399 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1400 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1401 if ((flags & MSG_MORE) &&
1402 !(rt->dst.dev->features&NETIF_F_SG))
1405 alloclen = datalen + fragheaderlen;
1407 alloclen += dst_exthdrlen;
1409 if (datalen != length + fraggap) {
1411 * this is not the last fragment, the trailer
1412 * space is regarded as data space.
1414 datalen += rt->dst.trailer_len;
1417 alloclen += rt->dst.trailer_len;
1418 fraglen = datalen + fragheaderlen;
1421 * We just reserve space for fragment header.
1422 * Note: this may be overallocation if the message
1423 * (without MSG_MORE) fits into the MTU.
1425 alloclen += sizeof(struct frag_hdr);
1428 skb = sock_alloc_send_skb(sk,
1430 (flags & MSG_DONTWAIT), &err);
1433 if (atomic_read(&sk->sk_wmem_alloc) <=
1435 skb = sock_wmalloc(sk,
1436 alloclen + hh_len, 1,
1444 * Fill in the control structures
1446 skb->protocol = htons(ETH_P_IPV6);
1447 skb->ip_summed = csummode;
1449 /* reserve for fragmentation and ipsec header */
1450 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1453 /* Only the initial fragment is time stamped */
1454 skb_shinfo(skb)->tx_flags = tx_flags;
1456 skb_shinfo(skb)->tskey = tskey;
1460 * Find where to start putting bytes
1462 data = skb_put(skb, fraglen);
1463 skb_set_network_header(skb, exthdrlen);
1464 data += fragheaderlen;
1465 skb->transport_header = (skb->network_header +
1468 skb->csum = skb_copy_and_csum_bits(
1469 skb_prev, maxfraglen,
1470 data + transhdrlen, fraggap, 0);
1471 skb_prev->csum = csum_sub(skb_prev->csum,
1474 pskb_trim_unique(skb_prev, maxfraglen);
1476 copy = datalen - transhdrlen - fraggap;
1482 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1489 length -= datalen - fraggap;
1495 * Put the packet on the pending queue
1497 __skb_queue_tail(queue, skb);
1504 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1508 if (getfrag(from, skb_put(skb, copy),
1509 offset, copy, off, skb) < 0) {
1510 __skb_trim(skb, off);
1515 int i = skb_shinfo(skb)->nr_frags;
1518 if (!sk_page_frag_refill(sk, pfrag))
1521 if (!skb_can_coalesce(skb, i, pfrag->page,
1524 if (i == MAX_SKB_FRAGS)
1527 __skb_fill_page_desc(skb, i, pfrag->page,
1529 skb_shinfo(skb)->nr_frags = ++i;
1530 get_page(pfrag->page);
1532 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1534 page_address(pfrag->page) + pfrag->offset,
1535 offset, copy, skb->len, skb) < 0)
1538 pfrag->offset += copy;
1539 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1541 skb->data_len += copy;
1542 skb->truesize += copy;
1543 atomic_add(copy, &sk->sk_wmem_alloc);
1554 cork->length -= length;
1555 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1559 int ip6_append_data(struct sock *sk,
1560 int getfrag(void *from, char *to, int offset, int len,
1561 int odd, struct sk_buff *skb),
1562 void *from, int length, int transhdrlen, int hlimit,
1563 int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1564 struct rt6_info *rt, unsigned int flags, int dontfrag)
1566 struct inet_sock *inet = inet_sk(sk);
1567 struct ipv6_pinfo *np = inet6_sk(sk);
1571 if (flags&MSG_PROBE)
1573 if (skb_queue_empty(&sk->sk_write_queue)) {
1577 err = ip6_setup_cork(sk, &inet->cork, &np->cork, hlimit,
1578 tclass, opt, rt, fl6);
1582 exthdrlen = (opt ? opt->opt_flen : 0);
1583 length += exthdrlen;
1584 transhdrlen += exthdrlen;
1586 fl6 = &inet->cork.fl.u.ip6;
1590 return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1591 &np->cork, sk_page_frag(sk), getfrag,
1592 from, length, transhdrlen, flags, dontfrag);
1594 EXPORT_SYMBOL_GPL(ip6_append_data);
1596 static void ip6_cork_release(struct inet_cork_full *cork,
1597 struct inet6_cork *v6_cork)
1600 kfree(v6_cork->opt->dst0opt);
1601 kfree(v6_cork->opt->dst1opt);
1602 kfree(v6_cork->opt->hopopt);
1603 kfree(v6_cork->opt->srcrt);
1604 kfree(v6_cork->opt);
1605 v6_cork->opt = NULL;
1608 if (cork->base.dst) {
1609 dst_release(cork->base.dst);
1610 cork->base.dst = NULL;
1611 cork->base.flags &= ~IPCORK_ALLFRAG;
1613 memset(&cork->fl, 0, sizeof(cork->fl));
1616 struct sk_buff *__ip6_make_skb(struct sock *sk,
1617 struct sk_buff_head *queue,
1618 struct inet_cork_full *cork,
1619 struct inet6_cork *v6_cork)
1621 struct sk_buff *skb, *tmp_skb;
1622 struct sk_buff **tail_skb;
1623 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1624 struct ipv6_pinfo *np = inet6_sk(sk);
1625 struct net *net = sock_net(sk);
1626 struct ipv6hdr *hdr;
1627 struct ipv6_txoptions *opt = v6_cork->opt;
1628 struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1629 struct flowi6 *fl6 = &cork->fl.u.ip6;
1630 unsigned char proto = fl6->flowi6_proto;
1632 skb = __skb_dequeue(queue);
1635 tail_skb = &(skb_shinfo(skb)->frag_list);
1637 /* move skb->data to ip header from ext header */
1638 if (skb->data < skb_network_header(skb))
1639 __skb_pull(skb, skb_network_offset(skb));
1640 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1641 __skb_pull(tmp_skb, skb_network_header_len(skb));
1642 *tail_skb = tmp_skb;
1643 tail_skb = &(tmp_skb->next);
1644 skb->len += tmp_skb->len;
1645 skb->data_len += tmp_skb->len;
1646 skb->truesize += tmp_skb->truesize;
1647 tmp_skb->destructor = NULL;
1651 /* Allow local fragmentation. */
1652 skb->ignore_df = ip6_sk_ignore_df(sk);
1654 *final_dst = fl6->daddr;
1655 __skb_pull(skb, skb_network_header_len(skb));
1656 if (opt && opt->opt_flen)
1657 ipv6_push_frag_opts(skb, opt, &proto);
1658 if (opt && opt->opt_nflen)
1659 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1661 skb_push(skb, sizeof(struct ipv6hdr));
1662 skb_reset_network_header(skb);
1663 hdr = ipv6_hdr(skb);
1665 ip6_flow_hdr(hdr, v6_cork->tclass,
1666 ip6_make_flowlabel(net, skb, fl6->flowlabel,
1667 np->autoflowlabel, fl6));
1668 hdr->hop_limit = v6_cork->hop_limit;
1669 hdr->nexthdr = proto;
1670 hdr->saddr = fl6->saddr;
1671 hdr->daddr = *final_dst;
1673 skb->priority = sk->sk_priority;
1674 skb->mark = sk->sk_mark;
1676 skb_dst_set(skb, dst_clone(&rt->dst));
1677 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1678 if (proto == IPPROTO_ICMPV6) {
1679 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1681 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1682 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1685 ip6_cork_release(cork, v6_cork);
1690 int ip6_send_skb(struct sk_buff *skb)
1692 struct net *net = sock_net(skb->sk);
1693 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1696 err = ip6_local_out(net, skb->sk, skb);
1699 err = net_xmit_errno(err);
1701 IP6_INC_STATS(net, rt->rt6i_idev,
1702 IPSTATS_MIB_OUTDISCARDS);
1708 int ip6_push_pending_frames(struct sock *sk)
1710 struct sk_buff *skb;
1712 skb = ip6_finish_skb(sk);
1716 return ip6_send_skb(skb);
1718 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1720 static void __ip6_flush_pending_frames(struct sock *sk,
1721 struct sk_buff_head *queue,
1722 struct inet_cork_full *cork,
1723 struct inet6_cork *v6_cork)
1725 struct sk_buff *skb;
1727 while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1729 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1730 IPSTATS_MIB_OUTDISCARDS);
1734 ip6_cork_release(cork, v6_cork);
1737 void ip6_flush_pending_frames(struct sock *sk)
1739 __ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1740 &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1742 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1744 struct sk_buff *ip6_make_skb(struct sock *sk,
1745 int getfrag(void *from, char *to, int offset,
1746 int len, int odd, struct sk_buff *skb),
1747 void *from, int length, int transhdrlen,
1748 int hlimit, int tclass,
1749 struct ipv6_txoptions *opt, struct flowi6 *fl6,
1750 struct rt6_info *rt, unsigned int flags,
1753 struct inet_cork_full cork;
1754 struct inet6_cork v6_cork;
1755 struct sk_buff_head queue;
1756 int exthdrlen = (opt ? opt->opt_flen : 0);
1759 if (flags & MSG_PROBE)
1762 __skb_queue_head_init(&queue);
1764 cork.base.flags = 0;
1766 cork.base.opt = NULL;
1768 err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
1770 return ERR_PTR(err);
1773 dontfrag = inet6_sk(sk)->dontfrag;
1775 err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
1776 ¤t->task_frag, getfrag, from,
1777 length + exthdrlen, transhdrlen + exthdrlen,
1780 __ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
1781 return ERR_PTR(err);
1784 return __ip6_make_skb(sk, &queue, &cork, &v6_cork);