2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
59 int __ip6_local_out(struct sk_buff *skb)
63 len = skb->len - sizeof(struct ipv6hdr);
64 if (len > IPV6_MAXPLEN)
66 ipv6_hdr(skb)->payload_len = htons(len);
68 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
69 skb_dst(skb)->dev, dst_output);
72 int ip6_local_out(struct sk_buff *skb)
76 err = __ip6_local_out(skb);
78 err = dst_output(skb);
82 EXPORT_SYMBOL_GPL(ip6_local_out);
84 static int ip6_finish_output2(struct sk_buff *skb)
86 struct dst_entry *dst = skb_dst(skb);
87 struct net_device *dev = dst->dev;
88 struct neighbour *neigh;
89 struct in6_addr *nexthop;
92 skb->protocol = htons(ETH_P_IPV6);
95 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
96 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
98 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
99 ((mroute6_socket(dev_net(dev), skb) &&
100 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
101 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
102 &ipv6_hdr(skb)->saddr))) {
103 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
105 /* Do not check for IFF_ALLMULTI; multicast routing
106 is not supported in any case.
109 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
110 newskb, NULL, newskb->dev,
113 if (ipv6_hdr(skb)->hop_limit == 0) {
114 IP6_INC_STATS(dev_net(dev), idev,
115 IPSTATS_MIB_OUTDISCARDS);
121 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
124 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
125 IPV6_ADDR_SCOPE_NODELOCAL &&
126 !(dev->flags & IFF_LOOPBACK)) {
133 nexthop = rt6_nexthop((struct rt6_info *)dst);
134 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
135 if (unlikely(!neigh))
136 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
137 if (!IS_ERR(neigh)) {
138 ret = dst_neigh_output(dst, neigh, skb);
139 rcu_read_unlock_bh();
142 rcu_read_unlock_bh();
144 IP6_INC_STATS(dev_net(dst->dev),
145 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
150 static int ip6_finish_output(struct sk_buff *skb)
152 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
153 dst_allfrag(skb_dst(skb)) ||
154 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
155 return ip6_fragment(skb, ip6_finish_output2);
157 return ip6_finish_output2(skb);
160 int ip6_output(struct sk_buff *skb)
162 struct net_device *dev = skb_dst(skb)->dev;
163 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
164 if (unlikely(idev->cnf.disable_ipv6)) {
165 IP6_INC_STATS(dev_net(dev), idev,
166 IPSTATS_MIB_OUTDISCARDS);
171 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
173 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
177 * xmit an sk_buff (used by TCP, SCTP and DCCP)
180 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
181 struct ipv6_txoptions *opt, int tclass)
183 struct net *net = sock_net(sk);
184 struct ipv6_pinfo *np = inet6_sk(sk);
185 struct in6_addr *first_hop = &fl6->daddr;
186 struct dst_entry *dst = skb_dst(skb);
188 u8 proto = fl6->flowi6_proto;
189 int seg_len = skb->len;
194 unsigned int head_room;
196 /* First: exthdrs may take lots of space (~8K for now)
197 MAX_HEADER is not enough.
199 head_room = opt->opt_nflen + opt->opt_flen;
200 seg_len += head_room;
201 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
203 if (skb_headroom(skb) < head_room) {
204 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
206 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
207 IPSTATS_MIB_OUTDISCARDS);
213 skb_set_owner_w(skb, sk);
216 ipv6_push_frag_opts(skb, opt, &proto);
218 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
221 skb_push(skb, sizeof(struct ipv6hdr));
222 skb_reset_network_header(skb);
226 * Fill in the IPv6 header
229 hlimit = np->hop_limit;
231 hlimit = ip6_dst_hoplimit(dst);
233 ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
235 hdr->payload_len = htons(seg_len);
236 hdr->nexthdr = proto;
237 hdr->hop_limit = hlimit;
239 hdr->saddr = fl6->saddr;
240 hdr->daddr = *first_hop;
242 skb->priority = sk->sk_priority;
243 skb->mark = sk->sk_mark;
246 if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
247 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
248 IPSTATS_MIB_OUT, skb->len);
249 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
250 dst->dev, dst_output);
254 ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
255 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
260 EXPORT_SYMBOL(ip6_xmit);
262 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
264 struct ip6_ra_chain *ra;
265 struct sock *last = NULL;
267 read_lock(&ip6_ra_lock);
268 for (ra = ip6_ra_chain; ra; ra = ra->next) {
269 struct sock *sk = ra->sk;
270 if (sk && ra->sel == sel &&
271 (!sk->sk_bound_dev_if ||
272 sk->sk_bound_dev_if == skb->dev->ifindex)) {
274 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
276 rawv6_rcv(last, skb2);
283 rawv6_rcv(last, skb);
284 read_unlock(&ip6_ra_lock);
287 read_unlock(&ip6_ra_lock);
291 static int ip6_forward_proxy_check(struct sk_buff *skb)
293 struct ipv6hdr *hdr = ipv6_hdr(skb);
294 u8 nexthdr = hdr->nexthdr;
298 if (ipv6_ext_hdr(nexthdr)) {
299 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
303 offset = sizeof(struct ipv6hdr);
305 if (nexthdr == IPPROTO_ICMPV6) {
306 struct icmp6hdr *icmp6;
308 if (!pskb_may_pull(skb, (skb_network_header(skb) +
309 offset + 1 - skb->data)))
312 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
314 switch (icmp6->icmp6_type) {
315 case NDISC_ROUTER_SOLICITATION:
316 case NDISC_ROUTER_ADVERTISEMENT:
317 case NDISC_NEIGHBOUR_SOLICITATION:
318 case NDISC_NEIGHBOUR_ADVERTISEMENT:
320 /* For reaction involving unicast neighbor discovery
321 * message destined to the proxied address, pass it to
331 * The proxying router can't forward traffic sent to a link-local
332 * address, so signal the sender and discard the packet. This
333 * behavior is clarified by the MIPv6 specification.
335 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
336 dst_link_failure(skb);
343 static inline int ip6_forward_finish(struct sk_buff *skb)
345 return dst_output(skb);
348 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
353 /* ipv6 conntrack defrag sets max_frag_size + local_df */
354 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
360 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
366 int ip6_forward(struct sk_buff *skb)
368 struct dst_entry *dst = skb_dst(skb);
369 struct ipv6hdr *hdr = ipv6_hdr(skb);
370 struct inet6_skb_parm *opt = IP6CB(skb);
371 struct net *net = dev_net(dst->dev);
374 if (net->ipv6.devconf_all->forwarding == 0)
377 if (skb_warn_if_lro(skb))
380 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
381 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
385 if (skb->pkt_type != PACKET_HOST)
388 skb_forward_csum(skb);
391 * We DO NOT make any processing on
392 * RA packets, pushing them to user level AS IS
393 * without ane WARRANTY that application will be able
394 * to interpret them. The reason is that we
395 * cannot make anything clever here.
397 * We are not end-node, so that if packet contains
398 * AH/ESP, we cannot make anything.
399 * Defragmentation also would be mistake, RA packets
400 * cannot be fragmented, because there is no warranty
401 * that different fragments will go along one path. --ANK
403 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
404 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
409 * check and decrement ttl
411 if (hdr->hop_limit <= 1) {
412 /* Force OUTPUT device used as source address */
414 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
415 IP6_INC_STATS_BH(net,
416 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
422 /* XXX: idev->cnf.proxy_ndp? */
423 if (net->ipv6.devconf_all->proxy_ndp &&
424 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
425 int proxied = ip6_forward_proxy_check(skb);
427 return ip6_input(skb);
428 else if (proxied < 0) {
429 IP6_INC_STATS(net, ip6_dst_idev(dst),
430 IPSTATS_MIB_INDISCARDS);
435 if (!xfrm6_route_forward(skb)) {
436 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
441 /* IPv6 specs say nothing about it, but it is clear that we cannot
442 send redirects to source routed frames.
443 We don't send redirects to frames decapsulated from IPsec.
445 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
446 struct in6_addr *target = NULL;
447 struct inet_peer *peer;
451 * incoming and outgoing devices are the same
455 rt = (struct rt6_info *) dst;
456 if (rt->rt6i_flags & RTF_GATEWAY)
457 target = &rt->rt6i_gateway;
459 target = &hdr->daddr;
461 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
463 /* Limit redirects both by destination (here)
464 and by source (inside ndisc_send_redirect)
466 if (inet_peer_xrlim_allow(peer, 1*HZ))
467 ndisc_send_redirect(skb, target);
471 int addrtype = ipv6_addr_type(&hdr->saddr);
473 /* This check is security critical. */
474 if (addrtype == IPV6_ADDR_ANY ||
475 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
477 if (addrtype & IPV6_ADDR_LINKLOCAL) {
478 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
479 ICMPV6_NOT_NEIGHBOUR, 0);
485 if (mtu < IPV6_MIN_MTU)
488 if (ip6_pkt_too_big(skb, mtu)) {
489 /* Again, force OUTPUT device used as source address */
491 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
492 IP6_INC_STATS_BH(net,
493 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
494 IP6_INC_STATS_BH(net,
495 ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
500 if (skb_cow(skb, dst->dev->hard_header_len)) {
501 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
507 /* Mangling hops number delayed to point after skb COW */
511 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
512 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
513 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
517 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
523 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
525 to->pkt_type = from->pkt_type;
526 to->priority = from->priority;
527 to->protocol = from->protocol;
529 skb_dst_set(to, dst_clone(skb_dst(from)));
531 to->mark = from->mark;
533 #ifdef CONFIG_NET_SCHED
534 to->tc_index = from->tc_index;
537 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
538 to->nf_trace = from->nf_trace;
540 skb_copy_secmark(to, from);
543 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
545 struct sk_buff *frag;
546 struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
547 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
548 struct ipv6hdr *tmp_hdr;
550 unsigned int mtu, hlen, left, len;
553 int ptr, offset = 0, err=0;
554 u8 *prevhdr, nexthdr = 0;
555 struct net *net = dev_net(skb_dst(skb)->dev);
557 hlen = ip6_find_1stfragopt(skb, &prevhdr);
560 mtu = ip6_skb_dst_mtu(skb);
562 /* We must not fragment if the socket is set to force MTU discovery
563 * or if the skb it not generated by a local socket.
565 if (unlikely(!skb->local_df && skb->len > mtu) ||
566 (IP6CB(skb)->frag_max_size &&
567 IP6CB(skb)->frag_max_size > mtu)) {
568 if (skb->sk && dst_allfrag(skb_dst(skb)))
569 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
571 skb->dev = skb_dst(skb)->dev;
572 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
573 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
574 IPSTATS_MIB_FRAGFAILS);
579 if (np && np->frag_size < mtu) {
583 mtu -= hlen + sizeof(struct frag_hdr);
585 if (skb_has_frag_list(skb)) {
586 int first_len = skb_pagelen(skb);
587 struct sk_buff *frag2;
589 if (first_len - hlen > mtu ||
590 ((first_len - hlen) & 7) ||
594 skb_walk_frags(skb, frag) {
595 /* Correct geometry. */
596 if (frag->len > mtu ||
597 ((frag->len & 7) && frag->next) ||
598 skb_headroom(frag) < hlen)
599 goto slow_path_clean;
601 /* Partially cloned skb? */
602 if (skb_shared(frag))
603 goto slow_path_clean;
608 frag->destructor = sock_wfree;
610 skb->truesize -= frag->truesize;
615 frag = skb_shinfo(skb)->frag_list;
616 skb_frag_list_init(skb);
619 *prevhdr = NEXTHDR_FRAGMENT;
620 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
622 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
623 IPSTATS_MIB_FRAGFAILS);
627 __skb_pull(skb, hlen);
628 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
629 __skb_push(skb, hlen);
630 skb_reset_network_header(skb);
631 memcpy(skb_network_header(skb), tmp_hdr, hlen);
633 ipv6_select_ident(fh, rt);
634 fh->nexthdr = nexthdr;
636 fh->frag_off = htons(IP6_MF);
637 frag_id = fh->identification;
639 first_len = skb_pagelen(skb);
640 skb->data_len = first_len - skb_headlen(skb);
641 skb->len = first_len;
642 ipv6_hdr(skb)->payload_len = htons(first_len -
643 sizeof(struct ipv6hdr));
648 /* Prepare header of the next frame,
649 * before previous one went down. */
651 frag->ip_summed = CHECKSUM_NONE;
652 skb_reset_transport_header(frag);
653 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
654 __skb_push(frag, hlen);
655 skb_reset_network_header(frag);
656 memcpy(skb_network_header(frag), tmp_hdr,
658 offset += skb->len - hlen - sizeof(struct frag_hdr);
659 fh->nexthdr = nexthdr;
661 fh->frag_off = htons(offset);
662 if (frag->next != NULL)
663 fh->frag_off |= htons(IP6_MF);
664 fh->identification = frag_id;
665 ipv6_hdr(frag)->payload_len =
667 sizeof(struct ipv6hdr));
668 ip6_copy_metadata(frag, skb);
673 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
674 IPSTATS_MIB_FRAGCREATES);
687 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
688 IPSTATS_MIB_FRAGOKS);
699 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
700 IPSTATS_MIB_FRAGFAILS);
705 skb_walk_frags(skb, frag2) {
709 frag2->destructor = NULL;
710 skb->truesize += frag2->truesize;
715 if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
716 skb_checksum_help(skb))
719 left = skb->len - hlen; /* Space per frame */
720 ptr = hlen; /* Where to start from */
723 * Fragment the datagram.
726 *prevhdr = NEXTHDR_FRAGMENT;
727 hroom = LL_RESERVED_SPACE(rt->dst.dev);
728 troom = rt->dst.dev->needed_tailroom;
731 * Keep copying data until we run out.
735 /* IF: it doesn't fit, use 'mtu' - the data space left */
738 /* IF: we are not sending up to and including the packet end
739 then align the next start on an eight byte boundary */
747 if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
748 hroom + troom, GFP_ATOMIC)) == NULL) {
749 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
750 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
751 IPSTATS_MIB_FRAGFAILS);
757 * Set up data on packet
760 ip6_copy_metadata(frag, skb);
761 skb_reserve(frag, hroom);
762 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
763 skb_reset_network_header(frag);
764 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
765 frag->transport_header = (frag->network_header + hlen +
766 sizeof(struct frag_hdr));
769 * Charge the memory for the fragment to any owner
773 skb_set_owner_w(frag, skb->sk);
776 * Copy the packet header into the new buffer.
778 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
781 * Build fragment header.
783 fh->nexthdr = nexthdr;
786 ipv6_select_ident(fh, rt);
787 frag_id = fh->identification;
789 fh->identification = frag_id;
792 * Copy a block of the IP datagram.
794 if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
798 fh->frag_off = htons(offset);
800 fh->frag_off |= htons(IP6_MF);
801 ipv6_hdr(frag)->payload_len = htons(frag->len -
802 sizeof(struct ipv6hdr));
808 * Put this fragment into the sending queue.
814 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
815 IPSTATS_MIB_FRAGCREATES);
817 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
818 IPSTATS_MIB_FRAGOKS);
823 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
824 IPSTATS_MIB_FRAGFAILS);
829 static inline int ip6_rt_check(const struct rt6key *rt_key,
830 const struct in6_addr *fl_addr,
831 const struct in6_addr *addr_cache)
833 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
834 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
837 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
838 struct dst_entry *dst,
839 const struct flowi6 *fl6)
841 struct ipv6_pinfo *np = inet6_sk(sk);
847 if (dst->ops->family != AF_INET6) {
852 rt = (struct rt6_info *)dst;
853 /* Yes, checking route validity in not connected
854 * case is not very simple. Take into account,
855 * that we do not support routing by source, TOS,
856 * and MSG_DONTROUTE --ANK (980726)
858 * 1. ip6_rt_check(): If route was host route,
859 * check that cached destination is current.
860 * If it is network route, we still may
861 * check its validity using saved pointer
862 * to the last used address: daddr_cache.
863 * We do not want to save whole address now,
864 * (because main consumer of this service
865 * is tcp, which has not this problem),
866 * so that the last trick works only on connected
868 * 2. oif also should be the same.
870 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
871 #ifdef CONFIG_IPV6_SUBTREES
872 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
874 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
883 static int ip6_dst_lookup_tail(struct sock *sk,
884 struct dst_entry **dst, struct flowi6 *fl6)
886 struct net *net = sock_net(sk);
887 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
894 *dst = ip6_route_output(net, sk, fl6);
896 if ((err = (*dst)->error))
897 goto out_err_release;
899 if (ipv6_addr_any(&fl6->saddr)) {
900 struct rt6_info *rt = (struct rt6_info *) *dst;
901 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
902 sk ? inet6_sk(sk)->srcprefs : 0,
905 goto out_err_release;
908 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
910 * Here if the dst entry we've looked up
911 * has a neighbour entry that is in the INCOMPLETE
912 * state and the src address from the flow is
913 * marked as OPTIMISTIC, we release the found
914 * dst entry and replace it instead with the
915 * dst entry of the nexthop router
917 rt = (struct rt6_info *) *dst;
919 n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
920 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
921 rcu_read_unlock_bh();
924 struct inet6_ifaddr *ifp;
925 struct flowi6 fl_gw6;
928 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
931 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
937 * We need to get the dst entry for the
938 * default router instead
941 memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
942 memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
943 *dst = ip6_route_output(net, sk, &fl_gw6);
944 if ((err = (*dst)->error))
945 goto out_err_release;
953 if (err == -ENETUNREACH)
954 IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES);
961 * ip6_dst_lookup - perform route lookup on flow
962 * @sk: socket which provides route info
963 * @dst: pointer to dst_entry * for result
964 * @fl6: flow to lookup
966 * This function performs a route lookup on the given flow.
968 * It returns zero on success, or a standard errno code on error.
970 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
973 return ip6_dst_lookup_tail(sk, dst, fl6);
975 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
978 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
979 * @sk: socket which provides route info
980 * @fl6: flow to lookup
981 * @final_dst: final destination address for ipsec lookup
982 * @can_sleep: we are in a sleepable context
984 * This function performs a route lookup on the given flow.
986 * It returns a valid dst pointer on success, or a pointer encoded
989 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
990 const struct in6_addr *final_dst,
993 struct dst_entry *dst = NULL;
996 err = ip6_dst_lookup_tail(sk, &dst, fl6);
1000 fl6->daddr = *final_dst;
1002 fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1004 return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1006 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1009 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1010 * @sk: socket which provides the dst cache and route info
1011 * @fl6: flow to lookup
1012 * @final_dst: final destination address for ipsec lookup
1013 * @can_sleep: we are in a sleepable context
1015 * This function performs a route lookup on the given flow with the
1016 * possibility of using the cached route in the socket if it is valid.
1017 * It will take the socket dst lock when operating on the dst cache.
1018 * As a result, this function can only be used in process context.
1020 * It returns a valid dst pointer on success, or a pointer encoded
1023 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1024 const struct in6_addr *final_dst,
1027 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1030 dst = ip6_sk_dst_check(sk, dst, fl6);
1032 err = ip6_dst_lookup_tail(sk, &dst, fl6);
1034 return ERR_PTR(err);
1036 fl6->daddr = *final_dst;
1038 fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1040 return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1042 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1044 static inline int ip6_ufo_append_data(struct sock *sk,
1045 int getfrag(void *from, char *to, int offset, int len,
1046 int odd, struct sk_buff *skb),
1047 void *from, int length, int hh_len, int fragheaderlen,
1048 int transhdrlen, int mtu,unsigned int flags,
1049 struct rt6_info *rt)
1052 struct sk_buff *skb;
1055 /* There is support for UDP large send offload by network
1056 * device, so create one single skb packet containing complete
1059 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1060 struct frag_hdr fhdr;
1062 skb = sock_alloc_send_skb(sk,
1063 hh_len + fragheaderlen + transhdrlen + 20,
1064 (flags & MSG_DONTWAIT), &err);
1068 /* reserve space for Hardware header */
1069 skb_reserve(skb, hh_len);
1071 /* create space for UDP/IP header */
1072 skb_put(skb,fragheaderlen + transhdrlen);
1074 /* initialize network header pointer */
1075 skb_reset_network_header(skb);
1077 /* initialize protocol header pointer */
1078 skb->transport_header = skb->network_header + fragheaderlen;
1080 skb->ip_summed = CHECKSUM_PARTIAL;
1083 /* Specify the length of each IPv6 datagram fragment.
1084 * It has to be a multiple of 8.
1086 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1087 sizeof(struct frag_hdr)) & ~7;
1088 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1089 ipv6_select_ident(&fhdr, rt);
1090 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1091 __skb_queue_tail(&sk->sk_write_queue, skb);
1094 return skb_append_datato_frags(sk, skb, getfrag, from,
1095 (length - transhdrlen));
1098 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1101 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1104 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1107 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1110 static void ip6_append_data_mtu(unsigned int *mtu,
1112 unsigned int fragheaderlen,
1113 struct sk_buff *skb,
1114 struct rt6_info *rt,
1115 unsigned int orig_mtu)
1117 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1119 /* first fragment, reserve header_len */
1120 *mtu = orig_mtu - rt->dst.header_len;
1124 * this fragment is not first, the headers
1125 * space is regarded as data space.
1129 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1130 + fragheaderlen - sizeof(struct frag_hdr);
1134 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1135 int offset, int len, int odd, struct sk_buff *skb),
1136 void *from, int length, int transhdrlen,
1137 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1138 struct rt6_info *rt, unsigned int flags, int dontfrag)
1140 struct inet_sock *inet = inet_sk(sk);
1141 struct ipv6_pinfo *np = inet6_sk(sk);
1142 struct inet_cork *cork;
1143 struct sk_buff *skb, *skb_prev = NULL;
1144 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1153 if (flags&MSG_PROBE)
1155 cork = &inet->cork.base;
1156 if (skb_queue_empty(&sk->sk_write_queue)) {
1161 if (WARN_ON(np->cork.opt))
1164 np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
1165 if (unlikely(np->cork.opt == NULL))
1168 np->cork.opt->tot_len = opt->tot_len;
1169 np->cork.opt->opt_flen = opt->opt_flen;
1170 np->cork.opt->opt_nflen = opt->opt_nflen;
1172 np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1174 if (opt->dst0opt && !np->cork.opt->dst0opt)
1177 np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1179 if (opt->dst1opt && !np->cork.opt->dst1opt)
1182 np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1184 if (opt->hopopt && !np->cork.opt->hopopt)
1187 np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1189 if (opt->srcrt && !np->cork.opt->srcrt)
1192 /* need source address above miyazawa*/
1195 cork->dst = &rt->dst;
1196 inet->cork.fl.u.ip6 = *fl6;
1197 np->cork.hop_limit = hlimit;
1198 np->cork.tclass = tclass;
1199 if (rt->dst.flags & DST_XFRM_TUNNEL)
1200 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1201 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1203 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1204 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1205 if (np->frag_size < mtu) {
1207 mtu = np->frag_size;
1209 cork->fragsize = mtu;
1210 if (dst_allfrag(rt->dst.path))
1211 cork->flags |= IPCORK_ALLFRAG;
1213 exthdrlen = (opt ? opt->opt_flen : 0);
1214 length += exthdrlen;
1215 transhdrlen += exthdrlen;
1216 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1218 rt = (struct rt6_info *)cork->dst;
1219 fl6 = &inet->cork.fl.u.ip6;
1224 mtu = cork->fragsize;
1228 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1230 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1231 (opt ? opt->opt_nflen : 0);
1232 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
1234 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1235 if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1236 ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
1241 /* For UDP, check if TX timestamp is enabled */
1242 if (sk->sk_type == SOCK_DGRAM)
1243 sock_tx_timestamp(sk, &tx_flags);
1246 * Let's try using as much space as possible.
1247 * Use MTU if total length of the message fits into the MTU.
1248 * Otherwise, we need to reserve fragment header and
1249 * fragment alignment (= 8-15 octects, in total).
1251 * Note that we may need to "move" the data from the tail of
1252 * of the buffer to the new fragment when we split
1255 * FIXME: It may be fragmented into multiple chunks
1256 * at once if non-fragmentable extension headers
1261 if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
1262 sk->sk_protocol == IPPROTO_RAW)) {
1263 ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1267 skb = skb_peek_tail(&sk->sk_write_queue);
1268 cork->length += length;
1269 if (((length > mtu) ||
1270 (skb && skb_has_frags(skb))) &&
1271 (sk->sk_protocol == IPPROTO_UDP) &&
1272 (rt->dst.dev->features & NETIF_F_UFO)) {
1273 err = ip6_ufo_append_data(sk, getfrag, from, length,
1274 hh_len, fragheaderlen,
1275 transhdrlen, mtu, flags, rt);
1284 while (length > 0) {
1285 /* Check if the remaining data fits into current packet. */
1286 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1288 copy = maxfraglen - skb->len;
1292 unsigned int datalen;
1293 unsigned int fraglen;
1294 unsigned int fraggap;
1295 unsigned int alloclen;
1297 /* There's no room in the current skb */
1299 fraggap = skb->len - maxfraglen;
1302 /* update mtu and maxfraglen if necessary */
1303 if (skb == NULL || skb_prev == NULL)
1304 ip6_append_data_mtu(&mtu, &maxfraglen,
1305 fragheaderlen, skb, rt,
1311 * If remaining data exceeds the mtu,
1312 * we know we need more fragment(s).
1314 datalen = length + fraggap;
1316 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1317 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1318 if ((flags & MSG_MORE) &&
1319 !(rt->dst.dev->features&NETIF_F_SG))
1322 alloclen = datalen + fragheaderlen;
1324 alloclen += dst_exthdrlen;
1326 if (datalen != length + fraggap) {
1328 * this is not the last fragment, the trailer
1329 * space is regarded as data space.
1331 datalen += rt->dst.trailer_len;
1334 alloclen += rt->dst.trailer_len;
1335 fraglen = datalen + fragheaderlen;
1338 * We just reserve space for fragment header.
1339 * Note: this may be overallocation if the message
1340 * (without MSG_MORE) fits into the MTU.
1342 alloclen += sizeof(struct frag_hdr);
1345 skb = sock_alloc_send_skb(sk,
1347 (flags & MSG_DONTWAIT), &err);
1350 if (atomic_read(&sk->sk_wmem_alloc) <=
1352 skb = sock_wmalloc(sk,
1353 alloclen + hh_len, 1,
1355 if (unlikely(skb == NULL))
1358 /* Only the initial fragment
1367 * Fill in the control structures
1369 skb->ip_summed = CHECKSUM_NONE;
1371 /* reserve for fragmentation and ipsec header */
1372 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1375 if (sk->sk_type == SOCK_DGRAM)
1376 skb_shinfo(skb)->tx_flags = tx_flags;
1379 * Find where to start putting bytes
1381 data = skb_put(skb, fraglen);
1382 skb_set_network_header(skb, exthdrlen);
1383 data += fragheaderlen;
1384 skb->transport_header = (skb->network_header +
1387 skb->csum = skb_copy_and_csum_bits(
1388 skb_prev, maxfraglen,
1389 data + transhdrlen, fraggap, 0);
1390 skb_prev->csum = csum_sub(skb_prev->csum,
1393 pskb_trim_unique(skb_prev, maxfraglen);
1395 copy = datalen - transhdrlen - fraggap;
1401 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1408 length -= datalen - fraggap;
1414 * Put the packet on the pending queue
1416 __skb_queue_tail(&sk->sk_write_queue, skb);
1423 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1427 if (getfrag(from, skb_put(skb, copy),
1428 offset, copy, off, skb) < 0) {
1429 __skb_trim(skb, off);
1434 int i = skb_shinfo(skb)->nr_frags;
1435 struct page_frag *pfrag = sk_page_frag(sk);
1438 if (!sk_page_frag_refill(sk, pfrag))
1441 if (!skb_can_coalesce(skb, i, pfrag->page,
1444 if (i == MAX_SKB_FRAGS)
1447 __skb_fill_page_desc(skb, i, pfrag->page,
1449 skb_shinfo(skb)->nr_frags = ++i;
1450 get_page(pfrag->page);
1452 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1454 page_address(pfrag->page) + pfrag->offset,
1455 offset, copy, skb->len, skb) < 0)
1458 pfrag->offset += copy;
1459 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1461 skb->data_len += copy;
1462 skb->truesize += copy;
1463 atomic_add(copy, &sk->sk_wmem_alloc);
1474 cork->length -= length;
1475 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1478 EXPORT_SYMBOL_GPL(ip6_append_data);
1480 static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1483 kfree(np->cork.opt->dst0opt);
1484 kfree(np->cork.opt->dst1opt);
1485 kfree(np->cork.opt->hopopt);
1486 kfree(np->cork.opt->srcrt);
1487 kfree(np->cork.opt);
1488 np->cork.opt = NULL;
1491 if (inet->cork.base.dst) {
1492 dst_release(inet->cork.base.dst);
1493 inet->cork.base.dst = NULL;
1494 inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1496 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1499 int ip6_push_pending_frames(struct sock *sk)
1501 struct sk_buff *skb, *tmp_skb;
1502 struct sk_buff **tail_skb;
1503 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1504 struct inet_sock *inet = inet_sk(sk);
1505 struct ipv6_pinfo *np = inet6_sk(sk);
1506 struct net *net = sock_net(sk);
1507 struct ipv6hdr *hdr;
1508 struct ipv6_txoptions *opt = np->cork.opt;
1509 struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1510 struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1511 unsigned char proto = fl6->flowi6_proto;
1514 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1516 tail_skb = &(skb_shinfo(skb)->frag_list);
1518 /* move skb->data to ip header from ext header */
1519 if (skb->data < skb_network_header(skb))
1520 __skb_pull(skb, skb_network_offset(skb));
1521 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1522 __skb_pull(tmp_skb, skb_network_header_len(skb));
1523 *tail_skb = tmp_skb;
1524 tail_skb = &(tmp_skb->next);
1525 skb->len += tmp_skb->len;
1526 skb->data_len += tmp_skb->len;
1527 skb->truesize += tmp_skb->truesize;
1528 tmp_skb->destructor = NULL;
1532 /* Allow local fragmentation. */
1533 if (np->pmtudisc < IPV6_PMTUDISC_DO)
1536 *final_dst = fl6->daddr;
1537 __skb_pull(skb, skb_network_header_len(skb));
1538 if (opt && opt->opt_flen)
1539 ipv6_push_frag_opts(skb, opt, &proto);
1540 if (opt && opt->opt_nflen)
1541 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1543 skb_push(skb, sizeof(struct ipv6hdr));
1544 skb_reset_network_header(skb);
1545 hdr = ipv6_hdr(skb);
1547 ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
1548 hdr->hop_limit = np->cork.hop_limit;
1549 hdr->nexthdr = proto;
1550 hdr->saddr = fl6->saddr;
1551 hdr->daddr = *final_dst;
1553 skb->priority = sk->sk_priority;
1554 skb->mark = sk->sk_mark;
1556 skb_dst_set(skb, dst_clone(&rt->dst));
1557 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1558 if (proto == IPPROTO_ICMPV6) {
1559 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1561 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1562 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1565 err = ip6_local_out(skb);
1568 err = net_xmit_errno(err);
1574 ip6_cork_release(inet, np);
1577 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1580 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1582 void ip6_flush_pending_frames(struct sock *sk)
1584 struct sk_buff *skb;
1586 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1588 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1589 IPSTATS_MIB_OUTDISCARDS);
1593 ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1595 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);