2 * NET3 IP device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
19 * Alexey Kuznetsov: pa_* fields are replaced with ifaddr
21 * Cyrus Durgin: updated for kmod
22 * Matthias Andree: in devinet_ioctl, compare label and
23 * address (4.4BSD alias style support),
24 * fall back to comparing just the label
29 #include <asm/uaccess.h>
30 #include <linux/bitops.h>
31 #include <linux/capability.h>
32 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/string.h>
37 #include <linux/socket.h>
38 #include <linux/sockios.h>
40 #include <linux/errno.h>
41 #include <linux/interrupt.h>
42 #include <linux/if_addr.h>
43 #include <linux/if_ether.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48 #include <linux/init.h>
49 #include <linux/notifier.h>
50 #include <linux/inetdevice.h>
51 #include <linux/igmp.h>
52 #include <linux/slab.h>
53 #include <linux/hash.h>
55 #include <linux/sysctl.h>
57 #include <linux/kmod.h>
58 #include <linux/netconf.h>
63 #include <net/route.h>
64 #include <net/ip_fib.h>
65 #include <net/rtnetlink.h>
66 #include <net/net_namespace.h>
67 #include <net/addrconf.h>
69 #include "fib_lookup.h"
71 static struct ipv4_devconf ipv4_devconf = {
73 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
74 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
75 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
76 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
77 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
78 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
82 static struct ipv4_devconf ipv4_devconf_dflt = {
84 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
85 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
86 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
87 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
88 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
89 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
90 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
94 #define IPV4_DEVCONF_DFLT(net, attr) \
95 IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
97 static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
98 [IFA_LOCAL] = { .type = NLA_U32 },
99 [IFA_ADDRESS] = { .type = NLA_U32 },
100 [IFA_BROADCAST] = { .type = NLA_U32 },
101 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
102 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
103 [IFA_FLAGS] = { .type = NLA_U32 },
106 #define IN4_ADDR_HSIZE_SHIFT 8
107 #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
109 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
111 static u32 inet_addr_hash(const struct net *net, __be32 addr)
113 u32 val = (__force u32) addr ^ net_hash_mix(net);
115 return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
118 static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
120 u32 hash = inet_addr_hash(net, ifa->ifa_local);
123 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
126 static void inet_hash_remove(struct in_ifaddr *ifa)
129 hlist_del_init_rcu(&ifa->hash);
133 * __ip_dev_find - find the first device with a given source address.
134 * @net: the net namespace
135 * @addr: the source address
136 * @devref: if true, take a reference on the found device
138 * If a caller uses devref=false, it should be protected by RCU, or RTNL
140 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
142 u32 hash = inet_addr_hash(net, addr);
143 struct net_device *result = NULL;
144 struct in_ifaddr *ifa;
147 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
148 if (ifa->ifa_local == addr) {
149 struct net_device *dev = ifa->ifa_dev->dev;
151 if (!net_eq(dev_net(dev), net))
158 struct flowi4 fl4 = { .daddr = addr };
159 struct fib_result res = { 0 };
160 struct fib_table *local;
162 /* Fallback to FIB local table so that communication
163 * over loopback subnets work.
165 local = fib_get_table(net, RT_TABLE_LOCAL);
167 !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
168 res.type == RTN_LOCAL)
169 result = FIB_RES_DEV(res);
171 if (result && devref)
176 EXPORT_SYMBOL(__ip_dev_find);
178 static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
180 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
181 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
184 static int devinet_sysctl_register(struct in_device *idev);
185 static void devinet_sysctl_unregister(struct in_device *idev);
187 static int devinet_sysctl_register(struct in_device *idev)
191 static void devinet_sysctl_unregister(struct in_device *idev)
196 /* Locks all the inet devices. */
198 static struct in_ifaddr *inet_alloc_ifa(void)
200 return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
203 static void inet_rcu_free_ifa(struct rcu_head *head)
205 struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
207 in_dev_put(ifa->ifa_dev);
211 static void inet_free_ifa(struct in_ifaddr *ifa)
213 call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
216 void in_dev_finish_destroy(struct in_device *idev)
218 struct net_device *dev = idev->dev;
220 WARN_ON(idev->ifa_list);
221 WARN_ON(idev->mc_list);
222 kfree(rcu_dereference_protected(idev->mc_hash, 1));
223 #ifdef NET_REFCNT_DEBUG
224 pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
228 pr_err("Freeing alive in_device %p\n", idev);
232 EXPORT_SYMBOL(in_dev_finish_destroy);
234 static struct in_device *inetdev_init(struct net_device *dev)
236 struct in_device *in_dev;
241 in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
244 memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
245 sizeof(in_dev->cnf));
246 in_dev->cnf.sysctl = NULL;
248 in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
249 if (!in_dev->arp_parms)
251 if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
252 dev_disable_lro(dev);
253 /* Reference in_dev->dev */
255 /* Account for reference dev->ip_ptr (below) */
258 err = devinet_sysctl_register(in_dev);
265 ip_mc_init_dev(in_dev);
266 if (dev->flags & IFF_UP)
269 /* we can receive as soon as ip_ptr is set -- do this last */
270 rcu_assign_pointer(dev->ip_ptr, in_dev);
272 return in_dev ?: ERR_PTR(err);
279 static void in_dev_rcu_put(struct rcu_head *head)
281 struct in_device *idev = container_of(head, struct in_device, rcu_head);
285 static void inetdev_destroy(struct in_device *in_dev)
287 struct in_ifaddr *ifa;
288 struct net_device *dev;
296 ip_mc_destroy_dev(in_dev);
298 while ((ifa = in_dev->ifa_list) != NULL) {
299 inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
303 RCU_INIT_POINTER(dev->ip_ptr, NULL);
305 devinet_sysctl_unregister(in_dev);
306 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
309 call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
312 int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
315 for_primary_ifa(in_dev) {
316 if (inet_ifa_match(a, ifa)) {
317 if (!b || inet_ifa_match(b, ifa)) {
322 } endfor_ifa(in_dev);
327 static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
328 int destroy, struct nlmsghdr *nlh, u32 portid)
330 struct in_ifaddr *promote = NULL;
331 struct in_ifaddr *ifa, *ifa1 = *ifap;
332 struct in_ifaddr *last_prim = in_dev->ifa_list;
333 struct in_ifaddr *prev_prom = NULL;
334 int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
341 /* 1. Deleting primary ifaddr forces deletion all secondaries
342 * unless alias promotion is set
345 if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
346 struct in_ifaddr **ifap1 = &ifa1->ifa_next;
348 while ((ifa = *ifap1) != NULL) {
349 if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
350 ifa1->ifa_scope <= ifa->ifa_scope)
353 if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
354 ifa1->ifa_mask != ifa->ifa_mask ||
355 !inet_ifa_match(ifa1->ifa_address, ifa)) {
356 ifap1 = &ifa->ifa_next;
362 inet_hash_remove(ifa);
363 *ifap1 = ifa->ifa_next;
365 rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
366 blocking_notifier_call_chain(&inetaddr_chain,
376 /* On promotion all secondaries from subnet are changing
377 * the primary IP, we must remove all their routes silently
378 * and later to add them back with new prefsrc. Do this
379 * while all addresses are on the device list.
381 for (ifa = promote; ifa; ifa = ifa->ifa_next) {
382 if (ifa1->ifa_mask == ifa->ifa_mask &&
383 inet_ifa_match(ifa1->ifa_address, ifa))
384 fib_del_ifaddr(ifa, ifa1);
390 *ifap = ifa1->ifa_next;
391 inet_hash_remove(ifa1);
393 /* 3. Announce address deletion */
395 /* Send message first, then call notifier.
396 At first sight, FIB update triggered by notifier
397 will refer to already deleted ifaddr, that could confuse
398 netlink listeners. It is not true: look, gated sees
399 that route deleted and if it still thinks that ifaddr
400 is valid, it will try to restore deleted routes... Grr.
401 So that, this order is correct.
403 rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
404 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
407 struct in_ifaddr *next_sec = promote->ifa_next;
410 prev_prom->ifa_next = promote->ifa_next;
411 promote->ifa_next = last_prim->ifa_next;
412 last_prim->ifa_next = promote;
415 promote->ifa_flags &= ~IFA_F_SECONDARY;
416 rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
417 blocking_notifier_call_chain(&inetaddr_chain,
419 for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
420 if (ifa1->ifa_mask != ifa->ifa_mask ||
421 !inet_ifa_match(ifa1->ifa_address, ifa))
431 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
434 __inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
437 static void check_lifetime(struct work_struct *work);
439 static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
441 static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
444 struct in_device *in_dev = ifa->ifa_dev;
445 struct in_ifaddr *ifa1, **ifap, **last_primary;
449 if (!ifa->ifa_local) {
454 ifa->ifa_flags &= ~IFA_F_SECONDARY;
455 last_primary = &in_dev->ifa_list;
457 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
458 ifap = &ifa1->ifa_next) {
459 if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
460 ifa->ifa_scope <= ifa1->ifa_scope)
461 last_primary = &ifa1->ifa_next;
462 if (ifa1->ifa_mask == ifa->ifa_mask &&
463 inet_ifa_match(ifa1->ifa_address, ifa)) {
464 if (ifa1->ifa_local == ifa->ifa_local) {
468 if (ifa1->ifa_scope != ifa->ifa_scope) {
472 ifa->ifa_flags |= IFA_F_SECONDARY;
476 if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
477 prandom_seed((__force u32) ifa->ifa_local);
481 ifa->ifa_next = *ifap;
484 inet_hash_insert(dev_net(in_dev->dev), ifa);
486 cancel_delayed_work(&check_lifetime_work);
487 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
489 /* Send message first, then call notifier.
490 Notifier will trigger FIB update, so that
491 listeners of netlink will know about new ifaddr */
492 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
493 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
498 static int inet_insert_ifa(struct in_ifaddr *ifa)
500 return __inet_insert_ifa(ifa, NULL, 0);
503 static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
505 struct in_device *in_dev = __in_dev_get_rtnl(dev);
513 ipv4_devconf_setall(in_dev);
514 neigh_parms_data_state_setall(in_dev->arp_parms);
515 if (ifa->ifa_dev != in_dev) {
516 WARN_ON(ifa->ifa_dev);
518 ifa->ifa_dev = in_dev;
520 if (ipv4_is_loopback(ifa->ifa_local))
521 ifa->ifa_scope = RT_SCOPE_HOST;
522 return inet_insert_ifa(ifa);
525 /* Caller must hold RCU or RTNL :
526 * We dont take a reference on found in_device
528 struct in_device *inetdev_by_index(struct net *net, int ifindex)
530 struct net_device *dev;
531 struct in_device *in_dev = NULL;
534 dev = dev_get_by_index_rcu(net, ifindex);
536 in_dev = rcu_dereference_rtnl(dev->ip_ptr);
540 EXPORT_SYMBOL(inetdev_by_index);
542 /* Called only from RTNL semaphored context. No locks. */
544 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
549 for_primary_ifa(in_dev) {
550 if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
552 } endfor_ifa(in_dev);
556 static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
558 struct ip_mreqn mreq = {
559 .imr_multiaddr.s_addr = ifa->ifa_address,
560 .imr_ifindex = ifa->ifa_dev->dev->ifindex,
568 ret = ip_mc_join_group(sk, &mreq);
570 ret = ip_mc_leave_group(sk, &mreq);
576 static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
578 struct net *net = sock_net(skb->sk);
579 struct nlattr *tb[IFA_MAX+1];
580 struct in_device *in_dev;
581 struct ifaddrmsg *ifm;
582 struct in_ifaddr *ifa, **ifap;
587 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
591 ifm = nlmsg_data(nlh);
592 in_dev = inetdev_by_index(net, ifm->ifa_index);
598 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
599 ifap = &ifa->ifa_next) {
601 ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
604 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
607 if (tb[IFA_ADDRESS] &&
608 (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
609 !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
612 if (ipv4_is_multicast(ifa->ifa_address))
613 ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
614 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
618 err = -EADDRNOTAVAIL;
623 #define INFINITY_LIFE_TIME 0xFFFFFFFF
625 static void check_lifetime(struct work_struct *work)
627 unsigned long now, next, next_sec, next_sched;
628 struct in_ifaddr *ifa;
629 struct hlist_node *n;
633 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
635 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
636 bool change_needed = false;
639 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
642 if (ifa->ifa_flags & IFA_F_PERMANENT)
645 /* We try to batch several events at once. */
646 age = (now - ifa->ifa_tstamp +
647 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
649 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
650 age >= ifa->ifa_valid_lft) {
651 change_needed = true;
652 } else if (ifa->ifa_preferred_lft ==
653 INFINITY_LIFE_TIME) {
655 } else if (age >= ifa->ifa_preferred_lft) {
656 if (time_before(ifa->ifa_tstamp +
657 ifa->ifa_valid_lft * HZ, next))
658 next = ifa->ifa_tstamp +
659 ifa->ifa_valid_lft * HZ;
661 if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
662 change_needed = true;
663 } else if (time_before(ifa->ifa_tstamp +
664 ifa->ifa_preferred_lft * HZ,
666 next = ifa->ifa_tstamp +
667 ifa->ifa_preferred_lft * HZ;
674 hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
677 if (ifa->ifa_flags & IFA_F_PERMANENT)
680 /* We try to batch several events at once. */
681 age = (now - ifa->ifa_tstamp +
682 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
684 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
685 age >= ifa->ifa_valid_lft) {
686 struct in_ifaddr **ifap;
688 for (ifap = &ifa->ifa_dev->ifa_list;
689 *ifap != NULL; ifap = &(*ifap)->ifa_next) {
691 inet_del_ifa(ifa->ifa_dev,
696 } else if (ifa->ifa_preferred_lft !=
697 INFINITY_LIFE_TIME &&
698 age >= ifa->ifa_preferred_lft &&
699 !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
700 ifa->ifa_flags |= IFA_F_DEPRECATED;
701 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
707 next_sec = round_jiffies_up(next);
710 /* If rounded timeout is accurate enough, accept it. */
711 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
712 next_sched = next_sec;
715 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
716 if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
717 next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
719 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
723 static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
726 unsigned long timeout;
728 ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
730 timeout = addrconf_timeout_fixup(valid_lft, HZ);
731 if (addrconf_finite_timeout(timeout))
732 ifa->ifa_valid_lft = timeout;
734 ifa->ifa_flags |= IFA_F_PERMANENT;
736 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
737 if (addrconf_finite_timeout(timeout)) {
739 ifa->ifa_flags |= IFA_F_DEPRECATED;
740 ifa->ifa_preferred_lft = timeout;
742 ifa->ifa_tstamp = jiffies;
743 if (!ifa->ifa_cstamp)
744 ifa->ifa_cstamp = ifa->ifa_tstamp;
747 static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
748 __u32 *pvalid_lft, __u32 *pprefered_lft)
750 struct nlattr *tb[IFA_MAX+1];
751 struct in_ifaddr *ifa;
752 struct ifaddrmsg *ifm;
753 struct net_device *dev;
754 struct in_device *in_dev;
757 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
761 ifm = nlmsg_data(nlh);
763 if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
766 dev = __dev_get_by_index(net, ifm->ifa_index);
771 in_dev = __in_dev_get_rtnl(dev);
776 ifa = inet_alloc_ifa();
779 * A potential indev allocation can be left alive, it stays
780 * assigned to its device and is destroy with it.
784 ipv4_devconf_setall(in_dev);
785 neigh_parms_data_state_setall(in_dev->arp_parms);
788 if (!tb[IFA_ADDRESS])
789 tb[IFA_ADDRESS] = tb[IFA_LOCAL];
791 INIT_HLIST_NODE(&ifa->hash);
792 ifa->ifa_prefixlen = ifm->ifa_prefixlen;
793 ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
794 ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
796 ifa->ifa_scope = ifm->ifa_scope;
797 ifa->ifa_dev = in_dev;
799 ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
800 ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
802 if (tb[IFA_BROADCAST])
803 ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
806 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
808 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
810 if (tb[IFA_CACHEINFO]) {
811 struct ifa_cacheinfo *ci;
813 ci = nla_data(tb[IFA_CACHEINFO]);
814 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
818 *pvalid_lft = ci->ifa_valid;
819 *pprefered_lft = ci->ifa_prefered;
830 static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
832 struct in_device *in_dev = ifa->ifa_dev;
833 struct in_ifaddr *ifa1, **ifap;
838 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
839 ifap = &ifa1->ifa_next) {
840 if (ifa1->ifa_mask == ifa->ifa_mask &&
841 inet_ifa_match(ifa1->ifa_address, ifa) &&
842 ifa1->ifa_local == ifa->ifa_local)
848 static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
850 struct net *net = sock_net(skb->sk);
851 struct in_ifaddr *ifa;
852 struct in_ifaddr *ifa_existing;
853 __u32 valid_lft = INFINITY_LIFE_TIME;
854 __u32 prefered_lft = INFINITY_LIFE_TIME;
858 ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
862 ifa_existing = find_matching_ifa(ifa);
864 /* It would be best to check for !NLM_F_CREATE here but
865 * userspace already relies on not having to provide this.
867 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
868 if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
869 int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
877 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
881 if (nlh->nlmsg_flags & NLM_F_EXCL ||
882 !(nlh->nlmsg_flags & NLM_F_REPLACE))
885 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
886 cancel_delayed_work(&check_lifetime_work);
887 queue_delayed_work(system_power_efficient_wq,
888 &check_lifetime_work, 0);
889 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
895 * Determine a default network mask, based on the IP address.
898 static int inet_abc_len(__be32 addr)
900 int rc = -1; /* Something else, probably a multicast. */
902 if (ipv4_is_zeronet(addr))
905 __u32 haddr = ntohl(addr);
907 if (IN_CLASSA(haddr))
909 else if (IN_CLASSB(haddr))
911 else if (IN_CLASSC(haddr))
919 int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
922 struct sockaddr_in sin_orig;
923 struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
924 struct in_device *in_dev;
925 struct in_ifaddr **ifap = NULL;
926 struct in_ifaddr *ifa = NULL;
927 struct net_device *dev;
930 int tryaddrmatch = 0;
933 * Fetch the caller's info block into kernel space
936 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
938 ifr.ifr_name[IFNAMSIZ - 1] = 0;
940 /* save original address for comparison */
941 memcpy(&sin_orig, sin, sizeof(*sin));
943 colon = strchr(ifr.ifr_name, ':');
947 dev_load(net, ifr.ifr_name);
950 case SIOCGIFADDR: /* Get interface address */
951 case SIOCGIFBRDADDR: /* Get the broadcast address */
952 case SIOCGIFDSTADDR: /* Get the destination address */
953 case SIOCGIFNETMASK: /* Get the netmask for the interface */
954 /* Note that these ioctls will not sleep,
955 so that we do not impose a lock.
956 One day we will be forced to put shlock here (I mean SMP)
958 tryaddrmatch = (sin_orig.sin_family == AF_INET);
959 memset(sin, 0, sizeof(*sin));
960 sin->sin_family = AF_INET;
965 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
968 case SIOCSIFADDR: /* Set interface address (and family) */
969 case SIOCSIFBRDADDR: /* Set the broadcast address */
970 case SIOCSIFDSTADDR: /* Set the destination address */
971 case SIOCSIFNETMASK: /* Set the netmask for the interface */
972 case SIOCKILLADDR: /* Nuke all sockets on this address */
974 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
977 if (sin->sin_family != AF_INET)
988 dev = __dev_get_by_name(net, ifr.ifr_name);
995 in_dev = __in_dev_get_rtnl(dev);
998 /* Matthias Andree */
999 /* compare label and address (4.4BSD style) */
1000 /* note: we only do this for a limited set of ioctls
1001 and only if the original address family was AF_INET.
1002 This is checked above. */
1003 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1004 ifap = &ifa->ifa_next) {
1005 if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
1006 sin_orig.sin_addr.s_addr ==
1012 /* we didn't get a match, maybe the application is
1013 4.3BSD-style and passed in junk so we fall back to
1014 comparing just the label */
1016 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1017 ifap = &ifa->ifa_next)
1018 if (!strcmp(ifr.ifr_name, ifa->ifa_label))
1023 ret = -EADDRNOTAVAIL;
1024 if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS
1025 && cmd != SIOCKILLADDR)
1029 case SIOCGIFADDR: /* Get interface address */
1030 sin->sin_addr.s_addr = ifa->ifa_local;
1033 case SIOCGIFBRDADDR: /* Get the broadcast address */
1034 sin->sin_addr.s_addr = ifa->ifa_broadcast;
1037 case SIOCGIFDSTADDR: /* Get the destination address */
1038 sin->sin_addr.s_addr = ifa->ifa_address;
1041 case SIOCGIFNETMASK: /* Get the netmask for the interface */
1042 sin->sin_addr.s_addr = ifa->ifa_mask;
1047 ret = -EADDRNOTAVAIL;
1051 if (!(ifr.ifr_flags & IFF_UP))
1052 inet_del_ifa(in_dev, ifap, 1);
1055 ret = dev_change_flags(dev, ifr.ifr_flags);
1058 case SIOCSIFADDR: /* Set interface address (and family) */
1060 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1065 ifa = inet_alloc_ifa();
1068 INIT_HLIST_NODE(&ifa->hash);
1070 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
1072 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1075 if (ifa->ifa_local == sin->sin_addr.s_addr)
1077 inet_del_ifa(in_dev, ifap, 0);
1078 ifa->ifa_broadcast = 0;
1082 ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1084 if (!(dev->flags & IFF_POINTOPOINT)) {
1085 ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1086 ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1087 if ((dev->flags & IFF_BROADCAST) &&
1088 ifa->ifa_prefixlen < 31)
1089 ifa->ifa_broadcast = ifa->ifa_address |
1092 ifa->ifa_prefixlen = 32;
1093 ifa->ifa_mask = inet_make_mask(32);
1095 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1096 ret = inet_set_ifa(dev, ifa);
1099 case SIOCSIFBRDADDR: /* Set the broadcast address */
1101 if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1102 inet_del_ifa(in_dev, ifap, 0);
1103 ifa->ifa_broadcast = sin->sin_addr.s_addr;
1104 inet_insert_ifa(ifa);
1108 case SIOCSIFDSTADDR: /* Set the destination address */
1110 if (ifa->ifa_address == sin->sin_addr.s_addr)
1113 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1116 inet_del_ifa(in_dev, ifap, 0);
1117 ifa->ifa_address = sin->sin_addr.s_addr;
1118 inet_insert_ifa(ifa);
1121 case SIOCSIFNETMASK: /* Set the netmask for the interface */
1124 * The mask we set must be legal.
1127 if (bad_mask(sin->sin_addr.s_addr, 0))
1130 if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1131 __be32 old_mask = ifa->ifa_mask;
1132 inet_del_ifa(in_dev, ifap, 0);
1133 ifa->ifa_mask = sin->sin_addr.s_addr;
1134 ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1136 /* See if current broadcast address matches
1137 * with current netmask, then recalculate
1138 * the broadcast address. Otherwise it's a
1139 * funny address, so don't touch it since
1140 * the user seems to know what (s)he's doing...
1142 if ((dev->flags & IFF_BROADCAST) &&
1143 (ifa->ifa_prefixlen < 31) &&
1144 (ifa->ifa_broadcast ==
1145 (ifa->ifa_local|~old_mask))) {
1146 ifa->ifa_broadcast = (ifa->ifa_local |
1147 ~sin->sin_addr.s_addr);
1149 inet_insert_ifa(ifa);
1152 case SIOCKILLADDR: /* Nuke all connections on this address */
1153 ret = tcp_nuke_addr(net, (struct sockaddr *) sin);
1162 ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
1166 static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
1168 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1169 struct in_ifaddr *ifa;
1176 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1178 done += sizeof(ifr);
1181 if (len < (int) sizeof(ifr))
1183 memset(&ifr, 0, sizeof(struct ifreq));
1184 strcpy(ifr.ifr_name, ifa->ifa_label);
1186 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1187 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1190 if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) {
1194 buf += sizeof(struct ifreq);
1195 len -= sizeof(struct ifreq);
1196 done += sizeof(struct ifreq);
1202 __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1205 struct in_device *in_dev;
1206 struct net *net = dev_net(dev);
1209 in_dev = __in_dev_get_rcu(dev);
1213 for_primary_ifa(in_dev) {
1214 if (ifa->ifa_scope > scope)
1216 if (!dst || inet_ifa_match(dst, ifa)) {
1217 addr = ifa->ifa_local;
1221 addr = ifa->ifa_local;
1222 } endfor_ifa(in_dev);
1228 /* Not loopback addresses on loopback should be preferred
1229 in this case. It is important that lo is the first interface
1232 for_each_netdev_rcu(net, dev) {
1233 in_dev = __in_dev_get_rcu(dev);
1237 for_primary_ifa(in_dev) {
1238 if (ifa->ifa_scope != RT_SCOPE_LINK &&
1239 ifa->ifa_scope <= scope) {
1240 addr = ifa->ifa_local;
1243 } endfor_ifa(in_dev);
1249 EXPORT_SYMBOL(inet_select_addr);
1251 static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1252 __be32 local, int scope)
1259 (local == ifa->ifa_local || !local) &&
1260 ifa->ifa_scope <= scope) {
1261 addr = ifa->ifa_local;
1266 same = (!local || inet_ifa_match(local, ifa)) &&
1267 (!dst || inet_ifa_match(dst, ifa));
1271 /* Is the selected addr into dst subnet? */
1272 if (inet_ifa_match(addr, ifa))
1274 /* No, then can we use new local src? */
1275 if (ifa->ifa_scope <= scope) {
1276 addr = ifa->ifa_local;
1279 /* search for large dst subnet for addr */
1283 } endfor_ifa(in_dev);
1285 return same ? addr : 0;
1289 * Confirm that local IP address exists using wildcards:
1290 * - net: netns to check, cannot be NULL
1291 * - in_dev: only on this interface, NULL=any interface
1292 * - dst: only in the same subnet as dst, 0=any dst
1293 * - local: address, 0=autoselect the local address
1294 * - scope: maximum allowed scope value for the local address
1296 __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1297 __be32 dst, __be32 local, int scope)
1300 struct net_device *dev;
1303 return confirm_addr_indev(in_dev, dst, local, scope);
1306 for_each_netdev_rcu(net, dev) {
1307 in_dev = __in_dev_get_rcu(dev);
1309 addr = confirm_addr_indev(in_dev, dst, local, scope);
1318 EXPORT_SYMBOL(inet_confirm_addr);
1324 int register_inetaddr_notifier(struct notifier_block *nb)
1326 return blocking_notifier_chain_register(&inetaddr_chain, nb);
1328 EXPORT_SYMBOL(register_inetaddr_notifier);
1330 int unregister_inetaddr_notifier(struct notifier_block *nb)
1332 return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1334 EXPORT_SYMBOL(unregister_inetaddr_notifier);
1336 /* Rename ifa_labels for a device name change. Make some effort to preserve
1337 * existing alias numbering and to create unique labels if possible.
1339 static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1341 struct in_ifaddr *ifa;
1344 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1345 char old[IFNAMSIZ], *dot;
1347 memcpy(old, ifa->ifa_label, IFNAMSIZ);
1348 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1351 dot = strchr(old, ':');
1353 sprintf(old, ":%d", named);
1356 if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1357 strcat(ifa->ifa_label, dot);
1359 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1361 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1365 static bool inetdev_valid_mtu(unsigned int mtu)
1370 static void inetdev_send_gratuitous_arp(struct net_device *dev,
1371 struct in_device *in_dev)
1374 struct in_ifaddr *ifa;
1376 for (ifa = in_dev->ifa_list; ifa;
1377 ifa = ifa->ifa_next) {
1378 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1379 ifa->ifa_local, dev,
1380 ifa->ifa_local, NULL,
1381 dev->dev_addr, NULL);
1385 /* Called only under RTNL semaphore */
1387 static int inetdev_event(struct notifier_block *this, unsigned long event,
1390 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1391 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1396 if (event == NETDEV_REGISTER) {
1397 in_dev = inetdev_init(dev);
1399 return notifier_from_errno(PTR_ERR(in_dev));
1400 if (dev->flags & IFF_LOOPBACK) {
1401 IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1402 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1404 } else if (event == NETDEV_CHANGEMTU) {
1405 /* Re-enabling IP */
1406 if (inetdev_valid_mtu(dev->mtu))
1407 in_dev = inetdev_init(dev);
1413 case NETDEV_REGISTER:
1414 pr_debug("%s: bug\n", __func__);
1415 RCU_INIT_POINTER(dev->ip_ptr, NULL);
1418 if (!inetdev_valid_mtu(dev->mtu))
1420 if (dev->flags & IFF_LOOPBACK) {
1421 struct in_ifaddr *ifa = inet_alloc_ifa();
1424 INIT_HLIST_NODE(&ifa->hash);
1426 ifa->ifa_address = htonl(INADDR_LOOPBACK);
1427 ifa->ifa_prefixlen = 8;
1428 ifa->ifa_mask = inet_make_mask(8);
1429 in_dev_hold(in_dev);
1430 ifa->ifa_dev = in_dev;
1431 ifa->ifa_scope = RT_SCOPE_HOST;
1432 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1433 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1434 INFINITY_LIFE_TIME);
1435 ipv4_devconf_setall(in_dev);
1436 neigh_parms_data_state_setall(in_dev->arp_parms);
1437 inet_insert_ifa(ifa);
1442 case NETDEV_CHANGEADDR:
1443 if (!IN_DEV_ARP_NOTIFY(in_dev))
1446 case NETDEV_NOTIFY_PEERS:
1447 /* Send gratuitous ARP to notify of link change */
1448 inetdev_send_gratuitous_arp(dev, in_dev);
1453 case NETDEV_PRE_TYPE_CHANGE:
1454 ip_mc_unmap(in_dev);
1456 case NETDEV_POST_TYPE_CHANGE:
1457 ip_mc_remap(in_dev);
1459 case NETDEV_CHANGEMTU:
1460 if (inetdev_valid_mtu(dev->mtu))
1462 /* disable IP when MTU is not enough */
1463 case NETDEV_UNREGISTER:
1464 inetdev_destroy(in_dev);
1466 case NETDEV_CHANGENAME:
1467 /* Do not notify about label change, this event is
1468 * not interesting to applications using netlink.
1470 inetdev_changename(dev, in_dev);
1472 devinet_sysctl_unregister(in_dev);
1473 devinet_sysctl_register(in_dev);
1480 static struct notifier_block ip_netdev_notifier = {
1481 .notifier_call = inetdev_event,
1484 static size_t inet_nlmsg_size(void)
1486 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1487 + nla_total_size(4) /* IFA_ADDRESS */
1488 + nla_total_size(4) /* IFA_LOCAL */
1489 + nla_total_size(4) /* IFA_BROADCAST */
1490 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1491 + nla_total_size(4) /* IFA_FLAGS */
1492 + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1495 static inline u32 cstamp_delta(unsigned long cstamp)
1497 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1500 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1501 unsigned long tstamp, u32 preferred, u32 valid)
1503 struct ifa_cacheinfo ci;
1505 ci.cstamp = cstamp_delta(cstamp);
1506 ci.tstamp = cstamp_delta(tstamp);
1507 ci.ifa_prefered = preferred;
1508 ci.ifa_valid = valid;
1510 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1513 static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1514 u32 portid, u32 seq, int event, unsigned int flags)
1516 struct ifaddrmsg *ifm;
1517 struct nlmsghdr *nlh;
1518 u32 preferred, valid;
1520 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
1524 ifm = nlmsg_data(nlh);
1525 ifm->ifa_family = AF_INET;
1526 ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1527 ifm->ifa_flags = ifa->ifa_flags;
1528 ifm->ifa_scope = ifa->ifa_scope;
1529 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1531 if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
1532 preferred = ifa->ifa_preferred_lft;
1533 valid = ifa->ifa_valid_lft;
1534 if (preferred != INFINITY_LIFE_TIME) {
1535 long tval = (jiffies - ifa->ifa_tstamp) / HZ;
1537 if (preferred > tval)
1541 if (valid != INFINITY_LIFE_TIME) {
1549 preferred = INFINITY_LIFE_TIME;
1550 valid = INFINITY_LIFE_TIME;
1552 if ((ifa->ifa_address &&
1553 nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1555 nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1556 (ifa->ifa_broadcast &&
1557 nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1558 (ifa->ifa_label[0] &&
1559 nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1560 nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
1561 put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
1563 goto nla_put_failure;
1565 nlmsg_end(skb, nlh);
1569 nlmsg_cancel(skb, nlh);
1573 static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1575 struct net *net = sock_net(skb->sk);
1578 int ip_idx, s_ip_idx;
1579 struct net_device *dev;
1580 struct in_device *in_dev;
1581 struct in_ifaddr *ifa;
1582 struct hlist_head *head;
1585 s_idx = idx = cb->args[1];
1586 s_ip_idx = ip_idx = cb->args[2];
1588 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1590 head = &net->dev_index_head[h];
1592 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1594 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1597 if (h > s_h || idx > s_idx)
1599 in_dev = __in_dev_get_rcu(dev);
1603 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
1604 ifa = ifa->ifa_next, ip_idx++) {
1605 if (ip_idx < s_ip_idx)
1607 if (inet_fill_ifaddr(skb, ifa,
1608 NETLINK_CB(cb->skb).portid,
1610 RTM_NEWADDR, NLM_F_MULTI) < 0) {
1614 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1625 cb->args[2] = ip_idx;
1630 static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1633 struct sk_buff *skb;
1634 u32 seq = nlh ? nlh->nlmsg_seq : 0;
1638 net = dev_net(ifa->ifa_dev->dev);
1639 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1643 err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
1645 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1646 WARN_ON(err == -EMSGSIZE);
1650 rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1654 rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1657 static size_t inet_get_link_af_size(const struct net_device *dev,
1658 u32 ext_filter_mask)
1660 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1665 return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1668 static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
1669 u32 ext_filter_mask)
1671 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1678 nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1682 for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1683 ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1688 static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1689 [IFLA_INET_CONF] = { .type = NLA_NESTED },
1692 static int inet_validate_link_af(const struct net_device *dev,
1693 const struct nlattr *nla)
1695 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1698 if (dev && !__in_dev_get_rtnl(dev))
1699 return -EAFNOSUPPORT;
1701 err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
1705 if (tb[IFLA_INET_CONF]) {
1706 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
1707 int cfgid = nla_type(a);
1712 if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
1720 static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
1722 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1723 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1727 return -EAFNOSUPPORT;
1729 if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
1732 if (tb[IFLA_INET_CONF]) {
1733 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
1734 ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
1740 static int inet_netconf_msgsize_devconf(int type)
1742 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
1743 + nla_total_size(4); /* NETCONFA_IFINDEX */
1745 /* type -1 is used for ALL */
1746 if (type == -1 || type == NETCONFA_FORWARDING)
1747 size += nla_total_size(4);
1748 if (type == -1 || type == NETCONFA_RP_FILTER)
1749 size += nla_total_size(4);
1750 if (type == -1 || type == NETCONFA_MC_FORWARDING)
1751 size += nla_total_size(4);
1752 if (type == -1 || type == NETCONFA_PROXY_NEIGH)
1753 size += nla_total_size(4);
1754 if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
1755 size += nla_total_size(4);
1760 static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
1761 struct ipv4_devconf *devconf, u32 portid,
1762 u32 seq, int event, unsigned int flags,
1765 struct nlmsghdr *nlh;
1766 struct netconfmsg *ncm;
1768 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
1773 ncm = nlmsg_data(nlh);
1774 ncm->ncm_family = AF_INET;
1776 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
1777 goto nla_put_failure;
1779 /* type -1 is used for ALL */
1780 if ((type == -1 || type == NETCONFA_FORWARDING) &&
1781 nla_put_s32(skb, NETCONFA_FORWARDING,
1782 IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
1783 goto nla_put_failure;
1784 if ((type == -1 || type == NETCONFA_RP_FILTER) &&
1785 nla_put_s32(skb, NETCONFA_RP_FILTER,
1786 IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
1787 goto nla_put_failure;
1788 if ((type == -1 || type == NETCONFA_MC_FORWARDING) &&
1789 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
1790 IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
1791 goto nla_put_failure;
1792 if ((type == -1 || type == NETCONFA_PROXY_NEIGH) &&
1793 nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
1794 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
1795 goto nla_put_failure;
1796 if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
1797 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
1798 IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
1799 goto nla_put_failure;
1801 nlmsg_end(skb, nlh);
1805 nlmsg_cancel(skb, nlh);
1809 void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
1810 struct ipv4_devconf *devconf)
1812 struct sk_buff *skb;
1815 skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC);
1819 err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
1820 RTM_NEWNETCONF, 0, type);
1822 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1823 WARN_ON(err == -EMSGSIZE);
1827 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_ATOMIC);
1831 rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
1834 static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
1835 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
1836 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
1837 [NETCONFA_RP_FILTER] = { .len = sizeof(int) },
1838 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
1839 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
1842 static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1843 struct nlmsghdr *nlh)
1845 struct net *net = sock_net(in_skb->sk);
1846 struct nlattr *tb[NETCONFA_MAX+1];
1847 struct netconfmsg *ncm;
1848 struct sk_buff *skb;
1849 struct ipv4_devconf *devconf;
1850 struct in_device *in_dev;
1851 struct net_device *dev;
1855 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
1856 devconf_ipv4_policy);
1861 if (!tb[NETCONFA_IFINDEX])
1864 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
1866 case NETCONFA_IFINDEX_ALL:
1867 devconf = net->ipv4.devconf_all;
1869 case NETCONFA_IFINDEX_DEFAULT:
1870 devconf = net->ipv4.devconf_dflt;
1873 dev = __dev_get_by_index(net, ifindex);
1876 in_dev = __in_dev_get_rtnl(dev);
1879 devconf = &in_dev->cnf;
1884 skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC);
1888 err = inet_netconf_fill_devconf(skb, ifindex, devconf,
1889 NETLINK_CB(in_skb).portid,
1890 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
1893 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1894 WARN_ON(err == -EMSGSIZE);
1898 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
1903 static int inet_netconf_dump_devconf(struct sk_buff *skb,
1904 struct netlink_callback *cb)
1906 struct net *net = sock_net(skb->sk);
1909 struct net_device *dev;
1910 struct in_device *in_dev;
1911 struct hlist_head *head;
1914 s_idx = idx = cb->args[1];
1916 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1918 head = &net->dev_index_head[h];
1920 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1922 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1925 in_dev = __in_dev_get_rcu(dev);
1929 if (inet_netconf_fill_devconf(skb, dev->ifindex,
1931 NETLINK_CB(cb->skb).portid,
1939 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1945 if (h == NETDEV_HASHENTRIES) {
1946 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
1947 net->ipv4.devconf_all,
1948 NETLINK_CB(cb->skb).portid,
1950 RTM_NEWNETCONF, NLM_F_MULTI,
1956 if (h == NETDEV_HASHENTRIES + 1) {
1957 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
1958 net->ipv4.devconf_dflt,
1959 NETLINK_CB(cb->skb).portid,
1961 RTM_NEWNETCONF, NLM_F_MULTI,
1974 #ifdef CONFIG_SYSCTL
1976 static void devinet_copy_dflt_conf(struct net *net, int i)
1978 struct net_device *dev;
1981 for_each_netdev_rcu(net, dev) {
1982 struct in_device *in_dev;
1984 in_dev = __in_dev_get_rcu(dev);
1985 if (in_dev && !test_bit(i, in_dev->cnf.state))
1986 in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
1991 /* called with RTNL locked */
1992 static void inet_forward_change(struct net *net)
1994 struct net_device *dev;
1995 int on = IPV4_DEVCONF_ALL(net, FORWARDING);
1997 IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
1998 IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
1999 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2000 NETCONFA_IFINDEX_ALL,
2001 net->ipv4.devconf_all);
2002 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2003 NETCONFA_IFINDEX_DEFAULT,
2004 net->ipv4.devconf_dflt);
2006 for_each_netdev(net, dev) {
2007 struct in_device *in_dev;
2009 dev_disable_lro(dev);
2011 in_dev = __in_dev_get_rcu(dev);
2013 IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2014 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2015 dev->ifindex, &in_dev->cnf);
2021 static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2023 if (cnf == net->ipv4.devconf_dflt)
2024 return NETCONFA_IFINDEX_DEFAULT;
2025 else if (cnf == net->ipv4.devconf_all)
2026 return NETCONFA_IFINDEX_ALL;
2028 struct in_device *idev
2029 = container_of(cnf, struct in_device, cnf);
2030 return idev->dev->ifindex;
2034 static int devinet_conf_proc(struct ctl_table *ctl, int write,
2035 void __user *buffer,
2036 size_t *lenp, loff_t *ppos)
2038 int old_value = *(int *)ctl->data;
2039 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2040 int new_value = *(int *)ctl->data;
2043 struct ipv4_devconf *cnf = ctl->extra1;
2044 struct net *net = ctl->extra2;
2045 int i = (int *)ctl->data - cnf->data;
2048 set_bit(i, cnf->state);
2050 if (cnf == net->ipv4.devconf_dflt)
2051 devinet_copy_dflt_conf(net, i);
2052 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2053 i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2054 if ((new_value == 0) && (old_value != 0))
2055 rt_cache_flush(net);
2057 if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2058 new_value != old_value) {
2059 ifindex = devinet_conf_ifindex(net, cnf);
2060 inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
2063 if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2064 new_value != old_value) {
2065 ifindex = devinet_conf_ifindex(net, cnf);
2066 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
2069 if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2070 new_value != old_value) {
2071 ifindex = devinet_conf_ifindex(net, cnf);
2072 inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2080 static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
2081 void __user *buffer,
2082 size_t *lenp, loff_t *ppos)
2084 int *valp = ctl->data;
2087 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2089 if (write && *valp != val) {
2090 struct net *net = ctl->extra2;
2092 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2093 if (!rtnl_trylock()) {
2094 /* Restore the original values before restarting */
2097 return restart_syscall();
2099 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2100 inet_forward_change(net);
2102 struct ipv4_devconf *cnf = ctl->extra1;
2103 struct in_device *idev =
2104 container_of(cnf, struct in_device, cnf);
2106 dev_disable_lro(idev->dev);
2107 inet_netconf_notify_devconf(net,
2108 NETCONFA_FORWARDING,
2113 rt_cache_flush(net);
2115 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2116 NETCONFA_IFINDEX_DEFAULT,
2117 net->ipv4.devconf_dflt);
2123 static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
2124 void __user *buffer,
2125 size_t *lenp, loff_t *ppos)
2127 int *valp = ctl->data;
2129 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2130 struct net *net = ctl->extra2;
2132 if (write && *valp != val)
2133 rt_cache_flush(net);
2138 #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2141 .data = ipv4_devconf.data + \
2142 IPV4_DEVCONF_ ## attr - 1, \
2143 .maxlen = sizeof(int), \
2145 .proc_handler = proc, \
2146 .extra1 = &ipv4_devconf, \
2149 #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2150 DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2152 #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2153 DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2155 #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2156 DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2158 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2159 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2161 static struct devinet_sysctl_table {
2162 struct ctl_table_header *sysctl_header;
2163 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
2164 } devinet_sysctl = {
2166 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2167 devinet_sysctl_forward),
2168 DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2170 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2171 DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2172 DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2173 DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2174 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2175 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2176 "accept_source_route"),
2177 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2178 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2179 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2180 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2181 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2182 DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2183 DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2184 DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2185 DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2186 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2187 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2188 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
2189 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2190 DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2191 "force_igmp_version"),
2192 DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2193 "igmpv2_unsolicited_report_interval"),
2194 DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2195 "igmpv3_unsolicited_report_interval"),
2196 DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2197 "ignore_routes_with_linkdown"),
2199 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2200 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2201 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2202 "promote_secondaries"),
2203 DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2208 static int __devinet_sysctl_register(struct net *net, char *dev_name,
2209 struct ipv4_devconf *p)
2212 struct devinet_sysctl_table *t;
2213 char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2215 t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
2219 for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
2220 t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2221 t->devinet_vars[i].extra1 = p;
2222 t->devinet_vars[i].extra2 = net;
2225 snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2227 t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2228 if (!t->sysctl_header)
2240 static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
2242 struct devinet_sysctl_table *t = cnf->sysctl;
2248 unregister_net_sysctl_table(t->sysctl_header);
2252 static int devinet_sysctl_register(struct in_device *idev)
2256 if (!sysctl_dev_name_is_allowed(idev->dev->name))
2259 err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2262 err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2265 neigh_sysctl_unregister(idev->arp_parms);
2269 static void devinet_sysctl_unregister(struct in_device *idev)
2271 __devinet_sysctl_unregister(&idev->cnf);
2272 neigh_sysctl_unregister(idev->arp_parms);
2275 static struct ctl_table ctl_forward_entry[] = {
2277 .procname = "ip_forward",
2278 .data = &ipv4_devconf.data[
2279 IPV4_DEVCONF_FORWARDING - 1],
2280 .maxlen = sizeof(int),
2282 .proc_handler = devinet_sysctl_forward,
2283 .extra1 = &ipv4_devconf,
2284 .extra2 = &init_net,
2290 static __net_init int devinet_init_net(struct net *net)
2293 struct ipv4_devconf *all, *dflt;
2294 #ifdef CONFIG_SYSCTL
2295 struct ctl_table *tbl = ctl_forward_entry;
2296 struct ctl_table_header *forw_hdr;
2300 all = &ipv4_devconf;
2301 dflt = &ipv4_devconf_dflt;
2303 if (!net_eq(net, &init_net)) {
2304 all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
2308 dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2310 goto err_alloc_dflt;
2312 #ifdef CONFIG_SYSCTL
2313 tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
2317 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2318 tbl[0].extra1 = all;
2319 tbl[0].extra2 = net;
2323 #ifdef CONFIG_SYSCTL
2324 err = __devinet_sysctl_register(net, "all", all);
2328 err = __devinet_sysctl_register(net, "default", dflt);
2333 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
2336 net->ipv4.forw_hdr = forw_hdr;
2339 net->ipv4.devconf_all = all;
2340 net->ipv4.devconf_dflt = dflt;
2343 #ifdef CONFIG_SYSCTL
2345 __devinet_sysctl_unregister(dflt);
2347 __devinet_sysctl_unregister(all);
2349 if (tbl != ctl_forward_entry)
2353 if (dflt != &ipv4_devconf_dflt)
2356 if (all != &ipv4_devconf)
2362 static __net_exit void devinet_exit_net(struct net *net)
2364 #ifdef CONFIG_SYSCTL
2365 struct ctl_table *tbl;
2367 tbl = net->ipv4.forw_hdr->ctl_table_arg;
2368 unregister_net_sysctl_table(net->ipv4.forw_hdr);
2369 __devinet_sysctl_unregister(net->ipv4.devconf_dflt);
2370 __devinet_sysctl_unregister(net->ipv4.devconf_all);
2373 kfree(net->ipv4.devconf_dflt);
2374 kfree(net->ipv4.devconf_all);
2377 static __net_initdata struct pernet_operations devinet_ops = {
2378 .init = devinet_init_net,
2379 .exit = devinet_exit_net,
2382 static struct rtnl_af_ops inet_af_ops __read_mostly = {
2384 .fill_link_af = inet_fill_link_af,
2385 .get_link_af_size = inet_get_link_af_size,
2386 .validate_link_af = inet_validate_link_af,
2387 .set_link_af = inet_set_link_af,
2390 void __init devinet_init(void)
2394 for (i = 0; i < IN4_ADDR_HSIZE; i++)
2395 INIT_HLIST_HEAD(&inet_addr_lst[i]);
2397 register_pernet_subsys(&devinet_ops);
2399 register_gifconf(PF_INET, inet_gifconf);
2400 register_netdevice_notifier(&ip_netdev_notifier);
2402 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
2404 rtnl_af_register(&inet_af_ops);
2406 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
2407 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
2408 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
2409 rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
2410 inet_netconf_dump_devconf, NULL);