2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on net/ipv4/icmp.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
21 * Andi Kleen : exception handling
22 * Andi Kleen add rate limits. never reply to a icmp.
23 * add more length checks and other fixes.
24 * yoshfuji : ensure to sent parameter problem for
26 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
28 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
29 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
32 #define pr_fmt(fmt) "IPv6: " fmt
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
39 #include <linux/kernel.h>
40 #include <linux/sockios.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/netfilter.h>
45 #include <linux/slab.h>
48 #include <linux/sysctl.h>
51 #include <linux/inet.h>
52 #include <linux/netdevice.h>
53 #include <linux/icmpv6.h>
59 #include <net/ip6_checksum.h>
61 #include <net/protocol.h>
63 #include <net/rawv6.h>
64 #include <net/transp_v6.h>
65 #include <net/ip6_route.h>
66 #include <net/addrconf.h>
69 #include <net/inet_common.h>
71 #include <asm/uaccess.h>
74 * The ICMP socket(s). This is the most convenient way to flow control
75 * our ICMP output as well as maintain a clean interface throughout
76 * all layers. All Socketless IP sends will soon be gone.
78 * On SMP we have one ICMP socket per-cpu.
80 static inline struct sock *icmpv6_sk(struct net *net)
82 return net->ipv6.icmp_sk[smp_processor_id()];
85 static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
86 u8 type, u8 code, int offset, __be32 info)
88 /* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
89 struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset);
90 struct net *net = dev_net(skb->dev);
92 if (type == ICMPV6_PKT_TOOBIG)
93 ip6_update_pmtu(skb, net, info, 0, 0);
94 else if (type == NDISC_REDIRECT)
95 ip6_redirect(skb, net, skb->dev->ifindex, 0);
97 if (!(type & ICMPV6_INFOMSG_MASK))
98 if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
99 ping_err(skb, offset, info);
102 static int icmpv6_rcv(struct sk_buff *skb);
104 static const struct inet6_protocol icmpv6_protocol = {
105 .handler = icmpv6_rcv,
106 .err_handler = icmpv6_err,
107 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
110 static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
117 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
118 /* This can happen if the output path (f.e. SIT or
119 * ip6ip6 tunnel) signals dst_link_failure() for an
120 * outgoing ICMP6 packet.
128 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
130 spin_unlock_bh(&sk->sk_lock.slock);
134 * Figure out, may we reply to this packet with icmp error.
136 * We do not reply, if:
137 * - it was icmp error message.
138 * - it is truncated, so that it is known, that protocol is ICMPV6
139 * (i.e. in the middle of some exthdr)
144 static bool is_ineligible(const struct sk_buff *skb)
146 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
147 int len = skb->len - ptr;
148 __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
154 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
157 if (nexthdr == IPPROTO_ICMPV6) {
159 tp = skb_header_pointer(skb,
160 ptr+offsetof(struct icmp6hdr, icmp6_type),
161 sizeof(_type), &_type);
163 !(*tp & ICMPV6_INFOMSG_MASK))
170 * Check the ICMP output rate limit
172 static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
175 struct dst_entry *dst;
176 struct net *net = sock_net(sk);
179 /* Informational messages are not limited. */
180 if (type & ICMPV6_INFOMSG_MASK)
183 /* Do not limit pmtu discovery, it would break it. */
184 if (type == ICMPV6_PKT_TOOBIG)
188 * Look up the output route.
189 * XXX: perhaps the expire for routing entries cloned by
190 * this lookup should be more aggressive (not longer than timeout).
192 dst = ip6_route_output(net, sk, fl6);
194 IP6_INC_STATS(net, ip6_dst_idev(dst),
195 IPSTATS_MIB_OUTNOROUTES);
196 } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
199 struct rt6_info *rt = (struct rt6_info *)dst;
200 int tmo = net->ipv6.sysctl.icmpv6_time;
201 struct inet_peer *peer;
203 /* Give more bandwidth to wider prefixes. */
204 if (rt->rt6i_dst.plen < 128)
205 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
207 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
208 res = inet_peer_xrlim_allow(peer, tmo);
217 * an inline helper for the "simple" if statement below
218 * checks if parameter problem report is caused by an
219 * unrecognized IPv6 option that has the Option Type
220 * highest-order two bits set to 10
223 static bool opt_unrec(struct sk_buff *skb, __u32 offset)
227 offset += skb_network_offset(skb);
228 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
231 return (*op & 0xC0) == 0x80;
234 int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
235 struct icmp6hdr *thdr, int len)
238 struct icmp6hdr *icmp6h;
241 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
244 icmp6h = icmp6_hdr(skb);
245 memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
246 icmp6h->icmp6_cksum = 0;
248 if (skb_queue_len(&sk->sk_write_queue) == 1) {
249 skb->csum = csum_partial(icmp6h,
250 sizeof(struct icmp6hdr), skb->csum);
251 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
253 len, fl6->flowi6_proto,
258 skb_queue_walk(&sk->sk_write_queue, skb) {
259 tmp_csum = csum_add(tmp_csum, skb->csum);
262 tmp_csum = csum_partial(icmp6h,
263 sizeof(struct icmp6hdr), tmp_csum);
264 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
266 len, fl6->flowi6_proto,
269 ip6_push_pending_frames(sk);
280 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
282 struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
283 struct sk_buff *org_skb = msg->skb;
286 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
288 skb->csum = csum_block_add(skb->csum, csum, odd);
289 if (!(msg->type & ICMPV6_INFOMSG_MASK))
290 nf_ct_attach(skb, org_skb);
294 #if IS_ENABLED(CONFIG_IPV6_MIP6)
295 static void mip6_addr_swap(struct sk_buff *skb)
297 struct ipv6hdr *iph = ipv6_hdr(skb);
298 struct inet6_skb_parm *opt = IP6CB(skb);
299 struct ipv6_destopt_hao *hao;
304 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
305 if (likely(off >= 0)) {
306 hao = (struct ipv6_destopt_hao *)
307 (skb_network_header(skb) + off);
309 iph->saddr = hao->addr;
315 static inline void mip6_addr_swap(struct sk_buff *skb) {}
318 static struct dst_entry *icmpv6_route_lookup(struct net *net,
323 struct dst_entry *dst, *dst2;
327 err = ip6_dst_lookup(sk, &dst, fl6);
332 * We won't send icmp if the destination is known
335 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
336 LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: acast source\n");
338 return ERR_PTR(-EINVAL);
341 /* No need to clone since we're just using its address. */
344 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0);
349 if (PTR_ERR(dst) == -EPERM)
355 err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6);
357 goto relookup_failed;
359 err = ip6_dst_lookup(sk, &dst2, &fl2);
361 goto relookup_failed;
363 dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP);
373 goto relookup_failed;
383 * Send an ICMP message in response to a packet in error
385 static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
387 struct net *net = dev_net(skb->dev);
388 struct inet6_dev *idev = NULL;
389 struct ipv6hdr *hdr = ipv6_hdr(skb);
391 struct ipv6_pinfo *np;
392 const struct in6_addr *saddr = NULL;
393 struct dst_entry *dst;
394 struct icmp6hdr tmp_hdr;
396 struct icmpv6_msg msg;
403 if ((u8 *)hdr < skb->head ||
404 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
408 * Make sure we respect the rules
409 * i.e. RFC 1885 2.4(e)
410 * Rule (e.1) is enforced by not using icmp6_send
411 * in any code that processes icmp errors.
413 addr_type = ipv6_addr_type(&hdr->daddr);
415 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0))
422 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
423 if (type != ICMPV6_PKT_TOOBIG &&
424 !(type == ICMPV6_PARAMPROB &&
425 code == ICMPV6_UNK_OPTION &&
426 (opt_unrec(skb, info))))
432 addr_type = ipv6_addr_type(&hdr->saddr);
438 if (__ipv6_addr_needs_scope_id(addr_type))
439 iif = skb->dev->ifindex;
442 * Must not send error if the source does not uniquely
443 * identify a single node (RFC2463 Section 2.4).
444 * We check unspecified / multicast addresses here,
445 * and anycast addresses will be checked later.
447 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
448 LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: addr_any/mcast source\n");
453 * Never answer to a ICMP packet.
455 if (is_ineligible(skb)) {
456 LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: no reply to icmp error\n");
462 memset(&fl6, 0, sizeof(fl6));
463 fl6.flowi6_proto = IPPROTO_ICMPV6;
464 fl6.daddr = hdr->saddr;
467 fl6.flowi6_oif = iif;
468 fl6.fl6_icmp_type = type;
469 fl6.fl6_icmp_code = code;
470 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
472 sk = icmpv6_xmit_lock(net);
477 if (!icmpv6_xrlim_allow(sk, type, &fl6))
480 tmp_hdr.icmp6_type = type;
481 tmp_hdr.icmp6_code = code;
482 tmp_hdr.icmp6_cksum = 0;
483 tmp_hdr.icmp6_pointer = htonl(info);
485 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
486 fl6.flowi6_oif = np->mcast_oif;
487 else if (!fl6.flowi6_oif)
488 fl6.flowi6_oif = np->ucast_oif;
490 dst = icmpv6_route_lookup(net, skb, sk, &fl6);
494 if (ipv6_addr_is_multicast(&fl6.daddr))
495 hlimit = np->mcast_hops;
497 hlimit = np->hop_limit;
499 hlimit = ip6_dst_hoplimit(dst);
502 msg.offset = skb_network_offset(skb);
505 len = skb->len - msg.offset;
506 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
508 LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
509 goto out_dst_release;
513 idev = __in6_dev_get(skb->dev);
515 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
516 len + sizeof(struct icmp6hdr),
517 sizeof(struct icmp6hdr), hlimit,
518 np->tclass, NULL, &fl6, (struct rt6_info *)dst,
519 MSG_DONTWAIT, np->dontfrag);
521 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
522 ip6_flush_pending_frames(sk);
524 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
525 len + sizeof(struct icmp6hdr));
531 icmpv6_xmit_unlock(sk);
534 /* Slightly more convenient version of icmp6_send.
536 void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
538 icmp6_send(skb, ICMPV6_PARAMPROB, code, pos);
542 static void icmpv6_echo_reply(struct sk_buff *skb)
544 struct net *net = dev_net(skb->dev);
546 struct inet6_dev *idev;
547 struct ipv6_pinfo *np;
548 const struct in6_addr *saddr = NULL;
549 struct icmp6hdr *icmph = icmp6_hdr(skb);
550 struct icmp6hdr tmp_hdr;
552 struct icmpv6_msg msg;
553 struct dst_entry *dst;
557 saddr = &ipv6_hdr(skb)->daddr;
559 if (!ipv6_unicast_destination(skb) &&
560 !(net->ipv6.anycast_src_echo_reply &&
561 ipv6_anycast_destination(skb)))
564 memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
565 tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
567 memset(&fl6, 0, sizeof(fl6));
568 fl6.flowi6_proto = IPPROTO_ICMPV6;
569 fl6.daddr = ipv6_hdr(skb)->saddr;
572 fl6.flowi6_oif = skb->dev->ifindex;
573 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
574 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
576 sk = icmpv6_xmit_lock(net);
581 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
582 fl6.flowi6_oif = np->mcast_oif;
583 else if (!fl6.flowi6_oif)
584 fl6.flowi6_oif = np->ucast_oif;
586 err = ip6_dst_lookup(sk, &dst, &fl6);
589 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
593 if (ipv6_addr_is_multicast(&fl6.daddr))
594 hlimit = np->mcast_hops;
596 hlimit = np->hop_limit;
598 hlimit = ip6_dst_hoplimit(dst);
600 idev = __in6_dev_get(skb->dev);
604 msg.type = ICMPV6_ECHO_REPLY;
606 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
607 sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6,
608 (struct rt6_info *)dst, MSG_DONTWAIT,
612 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
613 ip6_flush_pending_frames(sk);
615 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
616 skb->len + sizeof(struct icmp6hdr));
620 icmpv6_xmit_unlock(sk);
623 void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
625 const struct inet6_protocol *ipprot;
630 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
633 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
634 if (ipv6_ext_hdr(nexthdr)) {
635 /* now skip over extension headers */
636 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
637 &nexthdr, &frag_off);
641 inner_offset = sizeof(struct ipv6hdr);
644 /* Checkin header including 8 bytes of inner protocol header. */
645 if (!pskb_may_pull(skb, inner_offset+8))
648 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
649 Without this we will not able f.e. to make source routed
651 Corresponding argument (opt) to notifiers is already added.
656 ipprot = rcu_dereference(inet6_protos[nexthdr]);
657 if (ipprot && ipprot->err_handler)
658 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
661 raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
665 * Handle icmp messages
668 static int icmpv6_rcv(struct sk_buff *skb)
670 struct net_device *dev = skb->dev;
671 struct inet6_dev *idev = __in6_dev_get(dev);
672 const struct in6_addr *saddr, *daddr;
673 struct icmp6hdr *hdr;
676 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
677 struct sec_path *sp = skb_sec_path(skb);
680 if (!(sp && sp->xvec[sp->len - 1]->props.flags &
684 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr)))
687 nh = skb_network_offset(skb);
688 skb_set_network_header(skb, sizeof(*hdr));
690 if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
693 skb_set_network_header(skb, nh);
696 ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS);
698 saddr = &ipv6_hdr(skb)->saddr;
699 daddr = &ipv6_hdr(skb)->daddr;
701 /* Perform checksum. */
702 switch (skb->ip_summed) {
703 case CHECKSUM_COMPLETE:
704 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
709 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
711 if (__skb_checksum_complete(skb)) {
712 LIMIT_NETDEBUG(KERN_DEBUG
713 "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
719 if (!pskb_pull(skb, sizeof(*hdr)))
722 hdr = icmp6_hdr(skb);
724 type = hdr->icmp6_type;
726 ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type);
729 case ICMPV6_ECHO_REQUEST:
730 icmpv6_echo_reply(skb);
733 case ICMPV6_ECHO_REPLY:
737 case ICMPV6_PKT_TOOBIG:
738 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
739 standard destination cache. Seems, only "advanced"
740 destination cache will allow to solve this problem
743 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
745 hdr = icmp6_hdr(skb);
748 * Drop through to notify
751 case ICMPV6_DEST_UNREACH:
752 case ICMPV6_TIME_EXCEED:
753 case ICMPV6_PARAMPROB:
754 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
757 case NDISC_ROUTER_SOLICITATION:
758 case NDISC_ROUTER_ADVERTISEMENT:
759 case NDISC_NEIGHBOUR_SOLICITATION:
760 case NDISC_NEIGHBOUR_ADVERTISEMENT:
765 case ICMPV6_MGM_QUERY:
766 igmp6_event_query(skb);
769 case ICMPV6_MGM_REPORT:
770 igmp6_event_report(skb);
773 case ICMPV6_MGM_REDUCTION:
774 case ICMPV6_NI_QUERY:
775 case ICMPV6_NI_REPLY:
776 case ICMPV6_MLD2_REPORT:
777 case ICMPV6_DHAAD_REQUEST:
778 case ICMPV6_DHAAD_REPLY:
779 case ICMPV6_MOBILE_PREFIX_SOL:
780 case ICMPV6_MOBILE_PREFIX_ADV:
784 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
787 if (type & ICMPV6_INFOMSG_MASK)
791 * error of unknown type.
792 * must pass to upper level
795 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
802 ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
804 ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS);
810 void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
812 const struct in6_addr *saddr,
813 const struct in6_addr *daddr,
816 memset(fl6, 0, sizeof(*fl6));
819 fl6->flowi6_proto = IPPROTO_ICMPV6;
820 fl6->fl6_icmp_type = type;
821 fl6->fl6_icmp_code = 0;
822 fl6->flowi6_oif = oif;
823 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
827 * Special lock-class for __icmpv6_sk:
829 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
831 static int __net_init icmpv6_sk_init(struct net *net)
837 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
838 if (net->ipv6.icmp_sk == NULL)
841 for_each_possible_cpu(i) {
842 err = inet_ctl_sock_create(&sk, PF_INET6,
843 SOCK_RAW, IPPROTO_ICMPV6, net);
845 pr_err("Failed to initialize the ICMP6 control socket (err %d)\n",
850 net->ipv6.icmp_sk[i] = sk;
853 * Split off their lock-class, because sk->sk_dst_lock
854 * gets used from softirqs, which is safe for
855 * __icmpv6_sk (because those never get directly used
856 * via userspace syscalls), but unsafe for normal sockets.
858 lockdep_set_class(&sk->sk_dst_lock,
859 &icmpv6_socket_sk_dst_lock_key);
861 /* Enough space for 2 64K ICMP packets, including
862 * sk_buff struct overhead.
864 sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
869 for (j = 0; j < i; j++)
870 inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]);
871 kfree(net->ipv6.icmp_sk);
875 static void __net_exit icmpv6_sk_exit(struct net *net)
879 for_each_possible_cpu(i) {
880 inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
882 kfree(net->ipv6.icmp_sk);
885 static struct pernet_operations icmpv6_sk_ops = {
886 .init = icmpv6_sk_init,
887 .exit = icmpv6_sk_exit,
890 int __init icmpv6_init(void)
894 err = register_pernet_subsys(&icmpv6_sk_ops);
899 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
902 err = inet6_register_icmp_sender(icmp6_send);
908 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
910 pr_err("Failed to register ICMP6 protocol\n");
911 unregister_pernet_subsys(&icmpv6_sk_ops);
915 void icmpv6_cleanup(void)
917 inet6_unregister_icmp_sender(icmp6_send);
918 unregister_pernet_subsys(&icmpv6_sk_ops);
919 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
923 static const struct icmp6_err {
931 { /* ADM_PROHIBITED */
935 { /* Was NOT_NEIGHBOUR, now reserved */
957 int icmpv6_err_convert(u8 type, u8 code, int *err)
964 case ICMPV6_DEST_UNREACH:
966 if (code < ARRAY_SIZE(tab_unreach)) {
967 *err = tab_unreach[code].err;
968 fatal = tab_unreach[code].fatal;
972 case ICMPV6_PKT_TOOBIG:
976 case ICMPV6_PARAMPROB:
981 case ICMPV6_TIME_EXCEED:
988 EXPORT_SYMBOL(icmpv6_err_convert);
991 static struct ctl_table ipv6_icmp_table_template[] = {
993 .procname = "ratelimit",
994 .data = &init_net.ipv6.sysctl.icmpv6_time,
995 .maxlen = sizeof(int),
997 .proc_handler = proc_dointvec_ms_jiffies,
1002 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
1004 struct ctl_table *table;
1006 table = kmemdup(ipv6_icmp_table_template,
1007 sizeof(ipv6_icmp_table_template),
1011 table[0].data = &net->ipv6.sysctl.icmpv6_time;