2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
59 int __ip6_local_out(struct sk_buff *skb)
63 len = skb->len - sizeof(struct ipv6hdr);
64 if (len > IPV6_MAXPLEN)
66 ipv6_hdr(skb)->payload_len = htons(len);
68 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
69 skb_dst(skb)->dev, dst_output);
72 int ip6_local_out(struct sk_buff *skb)
76 err = __ip6_local_out(skb);
78 err = dst_output(skb);
82 EXPORT_SYMBOL_GPL(ip6_local_out);
84 static int ip6_finish_output2(struct sk_buff *skb)
86 struct dst_entry *dst = skb_dst(skb);
87 struct net_device *dev = dst->dev;
88 struct neighbour *neigh;
89 struct in6_addr *nexthop;
92 skb->protocol = htons(ETH_P_IPV6);
95 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
96 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
98 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
99 ((mroute6_socket(dev_net(dev), skb) &&
100 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
101 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
102 &ipv6_hdr(skb)->saddr))) {
103 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
105 /* Do not check for IFF_ALLMULTI; multicast routing
106 is not supported in any case.
109 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
110 newskb, NULL, newskb->dev,
113 if (ipv6_hdr(skb)->hop_limit == 0) {
114 IP6_INC_STATS(dev_net(dev), idev,
115 IPSTATS_MIB_OUTDISCARDS);
121 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
124 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
125 IPV6_ADDR_SCOPE_NODELOCAL &&
126 !(dev->flags & IFF_LOOPBACK)) {
133 nexthop = rt6_nexthop((struct rt6_info *)dst);
134 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
135 if (unlikely(!neigh))
136 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
137 if (!IS_ERR(neigh)) {
138 ret = dst_neigh_output(dst, neigh, skb);
139 rcu_read_unlock_bh();
142 rcu_read_unlock_bh();
144 IP6_INC_STATS(dev_net(dst->dev),
145 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
150 static int ip6_finish_output(struct sk_buff *skb)
152 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
153 dst_allfrag(skb_dst(skb)) ||
154 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
155 return ip6_fragment(skb, ip6_finish_output2);
157 return ip6_finish_output2(skb);
160 int ip6_output(struct sk_buff *skb)
162 struct net_device *dev = skb_dst(skb)->dev;
163 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
164 if (unlikely(idev->cnf.disable_ipv6)) {
165 IP6_INC_STATS(dev_net(dev), idev,
166 IPSTATS_MIB_OUTDISCARDS);
171 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
173 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
177 * xmit an sk_buff (used by TCP, SCTP and DCCP)
180 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
181 struct ipv6_txoptions *opt, int tclass)
183 struct net *net = sock_net(sk);
184 struct ipv6_pinfo *np = inet6_sk(sk);
185 struct in6_addr *first_hop = &fl6->daddr;
186 struct dst_entry *dst = skb_dst(skb);
188 u8 proto = fl6->flowi6_proto;
189 int seg_len = skb->len;
194 unsigned int head_room;
196 /* First: exthdrs may take lots of space (~8K for now)
197 MAX_HEADER is not enough.
199 head_room = opt->opt_nflen + opt->opt_flen;
200 seg_len += head_room;
201 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
203 if (skb_headroom(skb) < head_room) {
204 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
206 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
207 IPSTATS_MIB_OUTDISCARDS);
213 skb_set_owner_w(skb, sk);
216 ipv6_push_frag_opts(skb, opt, &proto);
218 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
221 skb_push(skb, sizeof(struct ipv6hdr));
222 skb_reset_network_header(skb);
226 * Fill in the IPv6 header
229 hlimit = np->hop_limit;
231 hlimit = ip6_dst_hoplimit(dst);
233 ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
235 hdr->payload_len = htons(seg_len);
236 hdr->nexthdr = proto;
237 hdr->hop_limit = hlimit;
239 hdr->saddr = fl6->saddr;
240 hdr->daddr = *first_hop;
242 skb->priority = sk->sk_priority;
243 skb->mark = sk->sk_mark;
246 if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
247 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
248 IPSTATS_MIB_OUT, skb->len);
249 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
250 dst->dev, dst_output);
254 ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
255 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
260 EXPORT_SYMBOL(ip6_xmit);
262 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
264 struct ip6_ra_chain *ra;
265 struct sock *last = NULL;
267 read_lock(&ip6_ra_lock);
268 for (ra = ip6_ra_chain; ra; ra = ra->next) {
269 struct sock *sk = ra->sk;
270 if (sk && ra->sel == sel &&
271 (!sk->sk_bound_dev_if ||
272 sk->sk_bound_dev_if == skb->dev->ifindex)) {
274 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
276 rawv6_rcv(last, skb2);
283 rawv6_rcv(last, skb);
284 read_unlock(&ip6_ra_lock);
287 read_unlock(&ip6_ra_lock);
291 static int ip6_forward_proxy_check(struct sk_buff *skb)
293 struct ipv6hdr *hdr = ipv6_hdr(skb);
294 u8 nexthdr = hdr->nexthdr;
298 if (ipv6_ext_hdr(nexthdr)) {
299 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
303 offset = sizeof(struct ipv6hdr);
305 if (nexthdr == IPPROTO_ICMPV6) {
306 struct icmp6hdr *icmp6;
308 if (!pskb_may_pull(skb, (skb_network_header(skb) +
309 offset + 1 - skb->data)))
312 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
314 switch (icmp6->icmp6_type) {
315 case NDISC_ROUTER_SOLICITATION:
316 case NDISC_ROUTER_ADVERTISEMENT:
317 case NDISC_NEIGHBOUR_SOLICITATION:
318 case NDISC_NEIGHBOUR_ADVERTISEMENT:
320 /* For reaction involving unicast neighbor discovery
321 * message destined to the proxied address, pass it to
331 * The proxying router can't forward traffic sent to a link-local
332 * address, so signal the sender and discard the packet. This
333 * behavior is clarified by the MIPv6 specification.
335 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
336 dst_link_failure(skb);
343 static inline int ip6_forward_finish(struct sk_buff *skb)
345 return dst_output(skb);
348 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
350 if (skb->len <= mtu || skb->local_df)
353 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
356 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
362 int ip6_forward(struct sk_buff *skb)
364 struct dst_entry *dst = skb_dst(skb);
365 struct ipv6hdr *hdr = ipv6_hdr(skb);
366 struct inet6_skb_parm *opt = IP6CB(skb);
367 struct net *net = dev_net(dst->dev);
370 if (net->ipv6.devconf_all->forwarding == 0)
373 if (skb_warn_if_lro(skb))
376 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
377 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
381 if (skb->pkt_type != PACKET_HOST)
384 skb_forward_csum(skb);
387 * We DO NOT make any processing on
388 * RA packets, pushing them to user level AS IS
389 * without ane WARRANTY that application will be able
390 * to interpret them. The reason is that we
391 * cannot make anything clever here.
393 * We are not end-node, so that if packet contains
394 * AH/ESP, we cannot make anything.
395 * Defragmentation also would be mistake, RA packets
396 * cannot be fragmented, because there is no warranty
397 * that different fragments will go along one path. --ANK
399 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
400 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
405 * check and decrement ttl
407 if (hdr->hop_limit <= 1) {
408 /* Force OUTPUT device used as source address */
410 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
411 IP6_INC_STATS_BH(net,
412 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
418 /* XXX: idev->cnf.proxy_ndp? */
419 if (net->ipv6.devconf_all->proxy_ndp &&
420 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
421 int proxied = ip6_forward_proxy_check(skb);
423 return ip6_input(skb);
424 else if (proxied < 0) {
425 IP6_INC_STATS(net, ip6_dst_idev(dst),
426 IPSTATS_MIB_INDISCARDS);
431 if (!xfrm6_route_forward(skb)) {
432 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
437 /* IPv6 specs say nothing about it, but it is clear that we cannot
438 send redirects to source routed frames.
439 We don't send redirects to frames decapsulated from IPsec.
441 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
442 struct in6_addr *target = NULL;
443 struct inet_peer *peer;
447 * incoming and outgoing devices are the same
451 rt = (struct rt6_info *) dst;
452 if (rt->rt6i_flags & RTF_GATEWAY)
453 target = &rt->rt6i_gateway;
455 target = &hdr->daddr;
457 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
459 /* Limit redirects both by destination (here)
460 and by source (inside ndisc_send_redirect)
462 if (inet_peer_xrlim_allow(peer, 1*HZ))
463 ndisc_send_redirect(skb, target);
467 int addrtype = ipv6_addr_type(&hdr->saddr);
469 /* This check is security critical. */
470 if (addrtype == IPV6_ADDR_ANY ||
471 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
473 if (addrtype & IPV6_ADDR_LINKLOCAL) {
474 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
475 ICMPV6_NOT_NEIGHBOUR, 0);
481 if (mtu < IPV6_MIN_MTU)
484 if (ip6_pkt_too_big(skb, mtu)) {
485 /* Again, force OUTPUT device used as source address */
487 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
488 IP6_INC_STATS_BH(net,
489 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
490 IP6_INC_STATS_BH(net,
491 ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
496 if (skb_cow(skb, dst->dev->hard_header_len)) {
497 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
503 /* Mangling hops number delayed to point after skb COW */
507 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
508 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
509 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
513 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
519 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
521 to->pkt_type = from->pkt_type;
522 to->priority = from->priority;
523 to->protocol = from->protocol;
525 skb_dst_set(to, dst_clone(skb_dst(from)));
527 to->mark = from->mark;
529 #ifdef CONFIG_NET_SCHED
530 to->tc_index = from->tc_index;
533 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
534 to->nf_trace = from->nf_trace;
536 skb_copy_secmark(to, from);
539 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
541 struct sk_buff *frag;
542 struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
543 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
544 struct ipv6hdr *tmp_hdr;
546 unsigned int mtu, hlen, left, len;
549 int ptr, offset = 0, err=0;
550 u8 *prevhdr, nexthdr = 0;
551 struct net *net = dev_net(skb_dst(skb)->dev);
553 hlen = ip6_find_1stfragopt(skb, &prevhdr);
556 mtu = ip6_skb_dst_mtu(skb);
558 /* We must not fragment if the socket is set to force MTU discovery
559 * or if the skb it not generated by a local socket.
561 if (unlikely(!skb->local_df && skb->len > mtu) ||
562 (IP6CB(skb)->frag_max_size &&
563 IP6CB(skb)->frag_max_size > mtu)) {
564 if (skb->sk && dst_allfrag(skb_dst(skb)))
565 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
567 skb->dev = skb_dst(skb)->dev;
568 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
569 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
570 IPSTATS_MIB_FRAGFAILS);
575 if (np && np->frag_size < mtu) {
579 mtu -= hlen + sizeof(struct frag_hdr);
581 if (skb_has_frag_list(skb)) {
582 int first_len = skb_pagelen(skb);
583 struct sk_buff *frag2;
585 if (first_len - hlen > mtu ||
586 ((first_len - hlen) & 7) ||
590 skb_walk_frags(skb, frag) {
591 /* Correct geometry. */
592 if (frag->len > mtu ||
593 ((frag->len & 7) && frag->next) ||
594 skb_headroom(frag) < hlen)
595 goto slow_path_clean;
597 /* Partially cloned skb? */
598 if (skb_shared(frag))
599 goto slow_path_clean;
604 frag->destructor = sock_wfree;
606 skb->truesize -= frag->truesize;
611 frag = skb_shinfo(skb)->frag_list;
612 skb_frag_list_init(skb);
615 *prevhdr = NEXTHDR_FRAGMENT;
616 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
618 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
619 IPSTATS_MIB_FRAGFAILS);
623 __skb_pull(skb, hlen);
624 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
625 __skb_push(skb, hlen);
626 skb_reset_network_header(skb);
627 memcpy(skb_network_header(skb), tmp_hdr, hlen);
629 ipv6_select_ident(fh, rt);
630 fh->nexthdr = nexthdr;
632 fh->frag_off = htons(IP6_MF);
633 frag_id = fh->identification;
635 first_len = skb_pagelen(skb);
636 skb->data_len = first_len - skb_headlen(skb);
637 skb->len = first_len;
638 ipv6_hdr(skb)->payload_len = htons(first_len -
639 sizeof(struct ipv6hdr));
644 /* Prepare header of the next frame,
645 * before previous one went down. */
647 frag->ip_summed = CHECKSUM_NONE;
648 skb_reset_transport_header(frag);
649 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
650 __skb_push(frag, hlen);
651 skb_reset_network_header(frag);
652 memcpy(skb_network_header(frag), tmp_hdr,
654 offset += skb->len - hlen - sizeof(struct frag_hdr);
655 fh->nexthdr = nexthdr;
657 fh->frag_off = htons(offset);
658 if (frag->next != NULL)
659 fh->frag_off |= htons(IP6_MF);
660 fh->identification = frag_id;
661 ipv6_hdr(frag)->payload_len =
663 sizeof(struct ipv6hdr));
664 ip6_copy_metadata(frag, skb);
669 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
670 IPSTATS_MIB_FRAGCREATES);
683 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
684 IPSTATS_MIB_FRAGOKS);
695 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
696 IPSTATS_MIB_FRAGFAILS);
701 skb_walk_frags(skb, frag2) {
705 frag2->destructor = NULL;
706 skb->truesize += frag2->truesize;
711 if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
712 skb_checksum_help(skb))
715 left = skb->len - hlen; /* Space per frame */
716 ptr = hlen; /* Where to start from */
719 * Fragment the datagram.
722 *prevhdr = NEXTHDR_FRAGMENT;
723 hroom = LL_RESERVED_SPACE(rt->dst.dev);
724 troom = rt->dst.dev->needed_tailroom;
727 * Keep copying data until we run out.
731 /* IF: it doesn't fit, use 'mtu' - the data space left */
734 /* IF: we are not sending up to and including the packet end
735 then align the next start on an eight byte boundary */
743 if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
744 hroom + troom, GFP_ATOMIC)) == NULL) {
745 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
746 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
747 IPSTATS_MIB_FRAGFAILS);
753 * Set up data on packet
756 ip6_copy_metadata(frag, skb);
757 skb_reserve(frag, hroom);
758 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
759 skb_reset_network_header(frag);
760 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
761 frag->transport_header = (frag->network_header + hlen +
762 sizeof(struct frag_hdr));
765 * Charge the memory for the fragment to any owner
769 skb_set_owner_w(frag, skb->sk);
772 * Copy the packet header into the new buffer.
774 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
777 * Build fragment header.
779 fh->nexthdr = nexthdr;
782 ipv6_select_ident(fh, rt);
783 frag_id = fh->identification;
785 fh->identification = frag_id;
788 * Copy a block of the IP datagram.
790 if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
794 fh->frag_off = htons(offset);
796 fh->frag_off |= htons(IP6_MF);
797 ipv6_hdr(frag)->payload_len = htons(frag->len -
798 sizeof(struct ipv6hdr));
804 * Put this fragment into the sending queue.
810 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
811 IPSTATS_MIB_FRAGCREATES);
813 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
814 IPSTATS_MIB_FRAGOKS);
819 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
820 IPSTATS_MIB_FRAGFAILS);
825 static inline int ip6_rt_check(const struct rt6key *rt_key,
826 const struct in6_addr *fl_addr,
827 const struct in6_addr *addr_cache)
829 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
830 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
833 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
834 struct dst_entry *dst,
835 const struct flowi6 *fl6)
837 struct ipv6_pinfo *np = inet6_sk(sk);
843 if (dst->ops->family != AF_INET6) {
848 rt = (struct rt6_info *)dst;
849 /* Yes, checking route validity in not connected
850 * case is not very simple. Take into account,
851 * that we do not support routing by source, TOS,
852 * and MSG_DONTROUTE --ANK (980726)
854 * 1. ip6_rt_check(): If route was host route,
855 * check that cached destination is current.
856 * If it is network route, we still may
857 * check its validity using saved pointer
858 * to the last used address: daddr_cache.
859 * We do not want to save whole address now,
860 * (because main consumer of this service
861 * is tcp, which has not this problem),
862 * so that the last trick works only on connected
864 * 2. oif also should be the same.
866 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
867 #ifdef CONFIG_IPV6_SUBTREES
868 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
870 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
879 static int ip6_dst_lookup_tail(struct sock *sk,
880 struct dst_entry **dst, struct flowi6 *fl6)
882 struct net *net = sock_net(sk);
883 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
890 *dst = ip6_route_output(net, sk, fl6);
892 if ((err = (*dst)->error))
893 goto out_err_release;
895 if (ipv6_addr_any(&fl6->saddr)) {
896 struct rt6_info *rt = (struct rt6_info *) *dst;
897 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
898 sk ? inet6_sk(sk)->srcprefs : 0,
901 goto out_err_release;
904 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
906 * Here if the dst entry we've looked up
907 * has a neighbour entry that is in the INCOMPLETE
908 * state and the src address from the flow is
909 * marked as OPTIMISTIC, we release the found
910 * dst entry and replace it instead with the
911 * dst entry of the nexthop router
913 rt = (struct rt6_info *) *dst;
915 n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
916 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
917 rcu_read_unlock_bh();
920 struct inet6_ifaddr *ifp;
921 struct flowi6 fl_gw6;
924 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
927 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
933 * We need to get the dst entry for the
934 * default router instead
937 memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
938 memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
939 *dst = ip6_route_output(net, sk, &fl_gw6);
940 if ((err = (*dst)->error))
941 goto out_err_release;
949 if (err == -ENETUNREACH)
950 IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES);
957 * ip6_dst_lookup - perform route lookup on flow
958 * @sk: socket which provides route info
959 * @dst: pointer to dst_entry * for result
960 * @fl6: flow to lookup
962 * This function performs a route lookup on the given flow.
964 * It returns zero on success, or a standard errno code on error.
966 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
969 return ip6_dst_lookup_tail(sk, dst, fl6);
971 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
974 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
975 * @sk: socket which provides route info
976 * @fl6: flow to lookup
977 * @final_dst: final destination address for ipsec lookup
978 * @can_sleep: we are in a sleepable context
980 * This function performs a route lookup on the given flow.
982 * It returns a valid dst pointer on success, or a pointer encoded
985 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
986 const struct in6_addr *final_dst,
989 struct dst_entry *dst = NULL;
992 err = ip6_dst_lookup_tail(sk, &dst, fl6);
996 fl6->daddr = *final_dst;
998 fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1000 return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1002 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1005 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1006 * @sk: socket which provides the dst cache and route info
1007 * @fl6: flow to lookup
1008 * @final_dst: final destination address for ipsec lookup
1009 * @can_sleep: we are in a sleepable context
1011 * This function performs a route lookup on the given flow with the
1012 * possibility of using the cached route in the socket if it is valid.
1013 * It will take the socket dst lock when operating on the dst cache.
1014 * As a result, this function can only be used in process context.
1016 * It returns a valid dst pointer on success, or a pointer encoded
1019 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1020 const struct in6_addr *final_dst,
1023 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1026 dst = ip6_sk_dst_check(sk, dst, fl6);
1028 err = ip6_dst_lookup_tail(sk, &dst, fl6);
1030 return ERR_PTR(err);
1032 fl6->daddr = *final_dst;
1034 fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1036 return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1038 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1040 static inline int ip6_ufo_append_data(struct sock *sk,
1041 int getfrag(void *from, char *to, int offset, int len,
1042 int odd, struct sk_buff *skb),
1043 void *from, int length, int hh_len, int fragheaderlen,
1044 int transhdrlen, int mtu,unsigned int flags,
1045 struct rt6_info *rt)
1048 struct sk_buff *skb;
1051 /* There is support for UDP large send offload by network
1052 * device, so create one single skb packet containing complete
1055 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1056 struct frag_hdr fhdr;
1058 skb = sock_alloc_send_skb(sk,
1059 hh_len + fragheaderlen + transhdrlen + 20,
1060 (flags & MSG_DONTWAIT), &err);
1064 /* reserve space for Hardware header */
1065 skb_reserve(skb, hh_len);
1067 /* create space for UDP/IP header */
1068 skb_put(skb,fragheaderlen + transhdrlen);
1070 /* initialize network header pointer */
1071 skb_reset_network_header(skb);
1073 /* initialize protocol header pointer */
1074 skb->transport_header = skb->network_header + fragheaderlen;
1076 skb->ip_summed = CHECKSUM_PARTIAL;
1079 /* Specify the length of each IPv6 datagram fragment.
1080 * It has to be a multiple of 8.
1082 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1083 sizeof(struct frag_hdr)) & ~7;
1084 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1085 ipv6_select_ident(&fhdr, rt);
1086 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1087 __skb_queue_tail(&sk->sk_write_queue, skb);
1090 return skb_append_datato_frags(sk, skb, getfrag, from,
1091 (length - transhdrlen));
1094 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1097 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1100 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1103 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1106 static void ip6_append_data_mtu(unsigned int *mtu,
1108 unsigned int fragheaderlen,
1109 struct sk_buff *skb,
1110 struct rt6_info *rt,
1111 unsigned int orig_mtu)
1113 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1115 /* first fragment, reserve header_len */
1116 *mtu = orig_mtu - rt->dst.header_len;
1120 * this fragment is not first, the headers
1121 * space is regarded as data space.
1125 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1126 + fragheaderlen - sizeof(struct frag_hdr);
1130 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1131 int offset, int len, int odd, struct sk_buff *skb),
1132 void *from, int length, int transhdrlen,
1133 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1134 struct rt6_info *rt, unsigned int flags, int dontfrag)
1136 struct inet_sock *inet = inet_sk(sk);
1137 struct ipv6_pinfo *np = inet6_sk(sk);
1138 struct inet_cork *cork;
1139 struct sk_buff *skb, *skb_prev = NULL;
1140 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1149 if (flags&MSG_PROBE)
1151 cork = &inet->cork.base;
1152 if (skb_queue_empty(&sk->sk_write_queue)) {
1157 if (WARN_ON(np->cork.opt))
1160 np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
1161 if (unlikely(np->cork.opt == NULL))
1164 np->cork.opt->tot_len = opt->tot_len;
1165 np->cork.opt->opt_flen = opt->opt_flen;
1166 np->cork.opt->opt_nflen = opt->opt_nflen;
1168 np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1170 if (opt->dst0opt && !np->cork.opt->dst0opt)
1173 np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1175 if (opt->dst1opt && !np->cork.opt->dst1opt)
1178 np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1180 if (opt->hopopt && !np->cork.opt->hopopt)
1183 np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1185 if (opt->srcrt && !np->cork.opt->srcrt)
1188 /* need source address above miyazawa*/
1191 cork->dst = &rt->dst;
1192 inet->cork.fl.u.ip6 = *fl6;
1193 np->cork.hop_limit = hlimit;
1194 np->cork.tclass = tclass;
1195 if (rt->dst.flags & DST_XFRM_TUNNEL)
1196 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1197 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1199 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1200 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1201 if (np->frag_size < mtu) {
1203 mtu = np->frag_size;
1205 cork->fragsize = mtu;
1206 if (dst_allfrag(rt->dst.path))
1207 cork->flags |= IPCORK_ALLFRAG;
1209 exthdrlen = (opt ? opt->opt_flen : 0);
1210 length += exthdrlen;
1211 transhdrlen += exthdrlen;
1212 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1214 rt = (struct rt6_info *)cork->dst;
1215 fl6 = &inet->cork.fl.u.ip6;
1220 mtu = cork->fragsize;
1224 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1226 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1227 (opt ? opt->opt_nflen : 0);
1228 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
1230 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1231 if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1232 ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
1237 /* For UDP, check if TX timestamp is enabled */
1238 if (sk->sk_type == SOCK_DGRAM)
1239 sock_tx_timestamp(sk, &tx_flags);
1242 * Let's try using as much space as possible.
1243 * Use MTU if total length of the message fits into the MTU.
1244 * Otherwise, we need to reserve fragment header and
1245 * fragment alignment (= 8-15 octects, in total).
1247 * Note that we may need to "move" the data from the tail of
1248 * of the buffer to the new fragment when we split
1251 * FIXME: It may be fragmented into multiple chunks
1252 * at once if non-fragmentable extension headers
1257 if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
1258 sk->sk_protocol == IPPROTO_RAW)) {
1259 ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1263 skb = skb_peek_tail(&sk->sk_write_queue);
1264 cork->length += length;
1265 if (((length > mtu) ||
1266 (skb && skb_has_frags(skb))) &&
1267 (sk->sk_protocol == IPPROTO_UDP) &&
1268 (rt->dst.dev->features & NETIF_F_UFO)) {
1269 err = ip6_ufo_append_data(sk, getfrag, from, length,
1270 hh_len, fragheaderlen,
1271 transhdrlen, mtu, flags, rt);
1280 while (length > 0) {
1281 /* Check if the remaining data fits into current packet. */
1282 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1284 copy = maxfraglen - skb->len;
1288 unsigned int datalen;
1289 unsigned int fraglen;
1290 unsigned int fraggap;
1291 unsigned int alloclen;
1293 /* There's no room in the current skb */
1295 fraggap = skb->len - maxfraglen;
1298 /* update mtu and maxfraglen if necessary */
1299 if (skb == NULL || skb_prev == NULL)
1300 ip6_append_data_mtu(&mtu, &maxfraglen,
1301 fragheaderlen, skb, rt,
1307 * If remaining data exceeds the mtu,
1308 * we know we need more fragment(s).
1310 datalen = length + fraggap;
1312 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1313 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1314 if ((flags & MSG_MORE) &&
1315 !(rt->dst.dev->features&NETIF_F_SG))
1318 alloclen = datalen + fragheaderlen;
1320 alloclen += dst_exthdrlen;
1322 if (datalen != length + fraggap) {
1324 * this is not the last fragment, the trailer
1325 * space is regarded as data space.
1327 datalen += rt->dst.trailer_len;
1330 alloclen += rt->dst.trailer_len;
1331 fraglen = datalen + fragheaderlen;
1334 * We just reserve space for fragment header.
1335 * Note: this may be overallocation if the message
1336 * (without MSG_MORE) fits into the MTU.
1338 alloclen += sizeof(struct frag_hdr);
1341 skb = sock_alloc_send_skb(sk,
1343 (flags & MSG_DONTWAIT), &err);
1346 if (atomic_read(&sk->sk_wmem_alloc) <=
1348 skb = sock_wmalloc(sk,
1349 alloclen + hh_len, 1,
1351 if (unlikely(skb == NULL))
1354 /* Only the initial fragment
1363 * Fill in the control structures
1365 skb->ip_summed = CHECKSUM_NONE;
1367 /* reserve for fragmentation and ipsec header */
1368 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1371 if (sk->sk_type == SOCK_DGRAM)
1372 skb_shinfo(skb)->tx_flags = tx_flags;
1375 * Find where to start putting bytes
1377 data = skb_put(skb, fraglen);
1378 skb_set_network_header(skb, exthdrlen);
1379 data += fragheaderlen;
1380 skb->transport_header = (skb->network_header +
1383 skb->csum = skb_copy_and_csum_bits(
1384 skb_prev, maxfraglen,
1385 data + transhdrlen, fraggap, 0);
1386 skb_prev->csum = csum_sub(skb_prev->csum,
1389 pskb_trim_unique(skb_prev, maxfraglen);
1391 copy = datalen - transhdrlen - fraggap;
1397 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1404 length -= datalen - fraggap;
1410 * Put the packet on the pending queue
1412 __skb_queue_tail(&sk->sk_write_queue, skb);
1419 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1423 if (getfrag(from, skb_put(skb, copy),
1424 offset, copy, off, skb) < 0) {
1425 __skb_trim(skb, off);
1430 int i = skb_shinfo(skb)->nr_frags;
1431 struct page_frag *pfrag = sk_page_frag(sk);
1434 if (!sk_page_frag_refill(sk, pfrag))
1437 if (!skb_can_coalesce(skb, i, pfrag->page,
1440 if (i == MAX_SKB_FRAGS)
1443 __skb_fill_page_desc(skb, i, pfrag->page,
1445 skb_shinfo(skb)->nr_frags = ++i;
1446 get_page(pfrag->page);
1448 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1450 page_address(pfrag->page) + pfrag->offset,
1451 offset, copy, skb->len, skb) < 0)
1454 pfrag->offset += copy;
1455 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1457 skb->data_len += copy;
1458 skb->truesize += copy;
1459 atomic_add(copy, &sk->sk_wmem_alloc);
1470 cork->length -= length;
1471 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1474 EXPORT_SYMBOL_GPL(ip6_append_data);
1476 static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1479 kfree(np->cork.opt->dst0opt);
1480 kfree(np->cork.opt->dst1opt);
1481 kfree(np->cork.opt->hopopt);
1482 kfree(np->cork.opt->srcrt);
1483 kfree(np->cork.opt);
1484 np->cork.opt = NULL;
1487 if (inet->cork.base.dst) {
1488 dst_release(inet->cork.base.dst);
1489 inet->cork.base.dst = NULL;
1490 inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1492 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1495 int ip6_push_pending_frames(struct sock *sk)
1497 struct sk_buff *skb, *tmp_skb;
1498 struct sk_buff **tail_skb;
1499 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1500 struct inet_sock *inet = inet_sk(sk);
1501 struct ipv6_pinfo *np = inet6_sk(sk);
1502 struct net *net = sock_net(sk);
1503 struct ipv6hdr *hdr;
1504 struct ipv6_txoptions *opt = np->cork.opt;
1505 struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1506 struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1507 unsigned char proto = fl6->flowi6_proto;
1510 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1512 tail_skb = &(skb_shinfo(skb)->frag_list);
1514 /* move skb->data to ip header from ext header */
1515 if (skb->data < skb_network_header(skb))
1516 __skb_pull(skb, skb_network_offset(skb));
1517 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1518 __skb_pull(tmp_skb, skb_network_header_len(skb));
1519 *tail_skb = tmp_skb;
1520 tail_skb = &(tmp_skb->next);
1521 skb->len += tmp_skb->len;
1522 skb->data_len += tmp_skb->len;
1523 skb->truesize += tmp_skb->truesize;
1524 tmp_skb->destructor = NULL;
1528 /* Allow local fragmentation. */
1529 if (np->pmtudisc < IPV6_PMTUDISC_DO)
1532 *final_dst = fl6->daddr;
1533 __skb_pull(skb, skb_network_header_len(skb));
1534 if (opt && opt->opt_flen)
1535 ipv6_push_frag_opts(skb, opt, &proto);
1536 if (opt && opt->opt_nflen)
1537 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1539 skb_push(skb, sizeof(struct ipv6hdr));
1540 skb_reset_network_header(skb);
1541 hdr = ipv6_hdr(skb);
1543 ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
1544 hdr->hop_limit = np->cork.hop_limit;
1545 hdr->nexthdr = proto;
1546 hdr->saddr = fl6->saddr;
1547 hdr->daddr = *final_dst;
1549 skb->priority = sk->sk_priority;
1550 skb->mark = sk->sk_mark;
1552 skb_dst_set(skb, dst_clone(&rt->dst));
1553 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1554 if (proto == IPPROTO_ICMPV6) {
1555 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1557 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1558 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1561 err = ip6_local_out(skb);
1564 err = net_xmit_errno(err);
1570 ip6_cork_release(inet, np);
1573 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1576 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1578 void ip6_flush_pending_frames(struct sock *sk)
1580 struct sk_buff *skb;
1582 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1584 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1585 IPSTATS_MIB_OUTDISCARDS);
1589 ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1591 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);