2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
121 #include <linux/ipv6.h>
122 #include <linux/in.h>
123 #include <linux/jhash.h>
124 #include <linux/random.h>
125 #include <trace/events/napi.h>
126 #include <trace/events/net.h>
127 #include <trace/events/skb.h>
128 #include <linux/pci.h>
129 #include <linux/inetdevice.h>
130 #include <linux/cpu_rmap.h>
131 #include <linux/static_key.h>
133 #include "net-sysfs.h"
135 /* Instead of increasing this, you should create a hash table. */
136 #define MAX_GRO_SKBS 8
138 /* This should be increased if a protocol with a bigger head is added. */
139 #define GRO_MAX_HEAD (MAX_HEADER + 128)
141 static DEFINE_SPINLOCK(ptype_lock);
142 static DEFINE_SPINLOCK(offload_lock);
143 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
144 struct list_head ptype_all __read_mostly; /* Taps */
145 static struct list_head offload_base __read_mostly;
148 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
151 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
153 * Writers must hold the rtnl semaphore while they loop through the
154 * dev_base_head list, and hold dev_base_lock for writing when they do the
155 * actual updates. This allows pure readers to access the list even
156 * while a writer is preparing to update it.
158 * To put it another way, dev_base_lock is held for writing only to
159 * protect against pure readers; the rtnl semaphore provides the
160 * protection against other writers.
162 * See, for example usages, register_netdevice() and
163 * unregister_netdevice(), which must be called with the rtnl
166 DEFINE_RWLOCK(dev_base_lock);
167 EXPORT_SYMBOL(dev_base_lock);
169 seqcount_t devnet_rename_seq;
171 static inline void dev_base_seq_inc(struct net *net)
173 while (++net->dev_base_seq == 0);
176 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
178 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
180 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
183 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
185 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
188 static inline void rps_lock(struct softnet_data *sd)
191 spin_lock(&sd->input_pkt_queue.lock);
195 static inline void rps_unlock(struct softnet_data *sd)
198 spin_unlock(&sd->input_pkt_queue.lock);
202 /* Device list insertion */
203 static void list_netdevice(struct net_device *dev)
205 struct net *net = dev_net(dev);
209 write_lock_bh(&dev_base_lock);
210 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
211 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
212 hlist_add_head_rcu(&dev->index_hlist,
213 dev_index_hash(net, dev->ifindex));
214 write_unlock_bh(&dev_base_lock);
216 dev_base_seq_inc(net);
219 /* Device list removal
220 * caller must respect a RCU grace period before freeing/reusing dev
222 static void unlist_netdevice(struct net_device *dev)
226 /* Unlink dev from the device chain */
227 write_lock_bh(&dev_base_lock);
228 list_del_rcu(&dev->dev_list);
229 hlist_del_rcu(&dev->name_hlist);
230 hlist_del_rcu(&dev->index_hlist);
231 write_unlock_bh(&dev_base_lock);
233 dev_base_seq_inc(dev_net(dev));
240 static RAW_NOTIFIER_HEAD(netdev_chain);
243 * Device drivers call our routines to queue packets here. We empty the
244 * queue in the local softnet handler.
247 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
248 EXPORT_PER_CPU_SYMBOL(softnet_data);
250 #ifdef CONFIG_LOCKDEP
252 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
253 * according to dev->type
255 static const unsigned short netdev_lock_type[] =
256 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
257 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
258 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
259 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
260 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
261 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
262 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
263 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
264 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
265 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
266 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
267 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
268 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
269 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
270 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
272 static const char *const netdev_lock_name[] =
273 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
274 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
275 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
276 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
277 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
278 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
279 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
280 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
281 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
282 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
283 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
284 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
285 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
286 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
287 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
289 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
290 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
292 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
296 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
297 if (netdev_lock_type[i] == dev_type)
299 /* the last key is used by default */
300 return ARRAY_SIZE(netdev_lock_type) - 1;
303 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
304 unsigned short dev_type)
308 i = netdev_lock_pos(dev_type);
309 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
310 netdev_lock_name[i]);
313 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
317 i = netdev_lock_pos(dev->type);
318 lockdep_set_class_and_name(&dev->addr_list_lock,
319 &netdev_addr_lock_key[i],
320 netdev_lock_name[i]);
323 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
324 unsigned short dev_type)
327 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
332 /*******************************************************************************
334 Protocol management and registration routines
336 *******************************************************************************/
339 * Add a protocol ID to the list. Now that the input handler is
340 * smarter we can dispense with all the messy stuff that used to be
343 * BEWARE!!! Protocol handlers, mangling input packets,
344 * MUST BE last in hash buckets and checking protocol handlers
345 * MUST start from promiscuous ptype_all chain in net_bh.
346 * It is true now, do not change it.
347 * Explanation follows: if protocol handler, mangling packet, will
348 * be the first on list, it is not able to sense, that packet
349 * is cloned and should be copied-on-write, so that it will
350 * change it and subsequent readers will get broken packet.
354 static inline struct list_head *ptype_head(const struct packet_type *pt)
356 if (pt->type == htons(ETH_P_ALL))
359 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
363 * dev_add_pack - add packet handler
364 * @pt: packet type declaration
366 * Add a protocol handler to the networking stack. The passed &packet_type
367 * is linked into kernel lists and may not be freed until it has been
368 * removed from the kernel lists.
370 * This call does not sleep therefore it can not
371 * guarantee all CPU's that are in middle of receiving packets
372 * will see the new packet type (until the next received packet).
375 void dev_add_pack(struct packet_type *pt)
377 struct list_head *head = ptype_head(pt);
379 spin_lock(&ptype_lock);
380 list_add_rcu(&pt->list, head);
381 spin_unlock(&ptype_lock);
383 EXPORT_SYMBOL(dev_add_pack);
386 * __dev_remove_pack - remove packet handler
387 * @pt: packet type declaration
389 * Remove a protocol handler that was previously added to the kernel
390 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
391 * from the kernel lists and can be freed or reused once this function
394 * The packet type might still be in use by receivers
395 * and must not be freed until after all the CPU's have gone
396 * through a quiescent state.
398 void __dev_remove_pack(struct packet_type *pt)
400 struct list_head *head = ptype_head(pt);
401 struct packet_type *pt1;
403 spin_lock(&ptype_lock);
405 list_for_each_entry(pt1, head, list) {
407 list_del_rcu(&pt->list);
412 pr_warn("dev_remove_pack: %p not found\n", pt);
414 spin_unlock(&ptype_lock);
416 EXPORT_SYMBOL(__dev_remove_pack);
419 * dev_remove_pack - remove packet handler
420 * @pt: packet type declaration
422 * Remove a protocol handler that was previously added to the kernel
423 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
424 * from the kernel lists and can be freed or reused once this function
427 * This call sleeps to guarantee that no CPU is looking at the packet
430 void dev_remove_pack(struct packet_type *pt)
432 __dev_remove_pack(pt);
436 EXPORT_SYMBOL(dev_remove_pack);
440 * dev_add_offload - register offload handlers
441 * @po: protocol offload declaration
443 * Add protocol offload handlers to the networking stack. The passed
444 * &proto_offload is linked into kernel lists and may not be freed until
445 * it has been removed from the kernel lists.
447 * This call does not sleep therefore it can not
448 * guarantee all CPU's that are in middle of receiving packets
449 * will see the new offload handlers (until the next received packet).
451 void dev_add_offload(struct packet_offload *po)
453 struct list_head *head = &offload_base;
455 spin_lock(&offload_lock);
456 list_add_rcu(&po->list, head);
457 spin_unlock(&offload_lock);
459 EXPORT_SYMBOL(dev_add_offload);
462 * __dev_remove_offload - remove offload handler
463 * @po: packet offload declaration
465 * Remove a protocol offload handler that was previously added to the
466 * kernel offload handlers by dev_add_offload(). The passed &offload_type
467 * is removed from the kernel lists and can be freed or reused once this
470 * The packet type might still be in use by receivers
471 * and must not be freed until after all the CPU's have gone
472 * through a quiescent state.
474 void __dev_remove_offload(struct packet_offload *po)
476 struct list_head *head = &offload_base;
477 struct packet_offload *po1;
479 spin_lock(&offload_lock);
481 list_for_each_entry(po1, head, list) {
483 list_del_rcu(&po->list);
488 pr_warn("dev_remove_offload: %p not found\n", po);
490 spin_unlock(&offload_lock);
492 EXPORT_SYMBOL(__dev_remove_offload);
495 * dev_remove_offload - remove packet offload handler
496 * @po: packet offload declaration
498 * Remove a packet offload handler that was previously added to the kernel
499 * offload handlers by dev_add_offload(). The passed &offload_type is
500 * removed from the kernel lists and can be freed or reused once this
503 * This call sleeps to guarantee that no CPU is looking at the packet
506 void dev_remove_offload(struct packet_offload *po)
508 __dev_remove_offload(po);
512 EXPORT_SYMBOL(dev_remove_offload);
514 /******************************************************************************
516 Device Boot-time Settings Routines
518 *******************************************************************************/
520 /* Boot time configuration table */
521 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
524 * netdev_boot_setup_add - add new setup entry
525 * @name: name of the device
526 * @map: configured settings for the device
528 * Adds new setup entry to the dev_boot_setup list. The function
529 * returns 0 on error and 1 on success. This is a generic routine to
532 static int netdev_boot_setup_add(char *name, struct ifmap *map)
534 struct netdev_boot_setup *s;
538 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
539 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
540 memset(s[i].name, 0, sizeof(s[i].name));
541 strlcpy(s[i].name, name, IFNAMSIZ);
542 memcpy(&s[i].map, map, sizeof(s[i].map));
547 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
551 * netdev_boot_setup_check - check boot time settings
552 * @dev: the netdevice
554 * Check boot time settings for the device.
555 * The found settings are set for the device to be used
556 * later in the device probing.
557 * Returns 0 if no settings found, 1 if they are.
559 int netdev_boot_setup_check(struct net_device *dev)
561 struct netdev_boot_setup *s = dev_boot_setup;
564 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
565 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
566 !strcmp(dev->name, s[i].name)) {
567 dev->irq = s[i].map.irq;
568 dev->base_addr = s[i].map.base_addr;
569 dev->mem_start = s[i].map.mem_start;
570 dev->mem_end = s[i].map.mem_end;
576 EXPORT_SYMBOL(netdev_boot_setup_check);
580 * netdev_boot_base - get address from boot time settings
581 * @prefix: prefix for network device
582 * @unit: id for network device
584 * Check boot time settings for the base address of device.
585 * The found settings are set for the device to be used
586 * later in the device probing.
587 * Returns 0 if no settings found.
589 unsigned long netdev_boot_base(const char *prefix, int unit)
591 const struct netdev_boot_setup *s = dev_boot_setup;
595 sprintf(name, "%s%d", prefix, unit);
598 * If device already registered then return base of 1
599 * to indicate not to probe for this interface
601 if (__dev_get_by_name(&init_net, name))
604 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
605 if (!strcmp(name, s[i].name))
606 return s[i].map.base_addr;
611 * Saves at boot time configured settings for any netdevice.
613 int __init netdev_boot_setup(char *str)
618 str = get_options(str, ARRAY_SIZE(ints), ints);
623 memset(&map, 0, sizeof(map));
627 map.base_addr = ints[2];
629 map.mem_start = ints[3];
631 map.mem_end = ints[4];
633 /* Add new entry to the list */
634 return netdev_boot_setup_add(str, &map);
637 __setup("netdev=", netdev_boot_setup);
639 /*******************************************************************************
641 Device Interface Subroutines
643 *******************************************************************************/
646 * __dev_get_by_name - find a device by its name
647 * @net: the applicable net namespace
648 * @name: name to find
650 * Find an interface by name. Must be called under RTNL semaphore
651 * or @dev_base_lock. If the name is found a pointer to the device
652 * is returned. If the name is not found then %NULL is returned. The
653 * reference counters are not incremented so the caller must be
654 * careful with locks.
657 struct net_device *__dev_get_by_name(struct net *net, const char *name)
659 struct net_device *dev;
660 struct hlist_head *head = dev_name_hash(net, name);
662 hlist_for_each_entry(dev, head, name_hlist)
663 if (!strncmp(dev->name, name, IFNAMSIZ))
668 EXPORT_SYMBOL(__dev_get_by_name);
671 * dev_get_by_name_rcu - find a device by its name
672 * @net: the applicable net namespace
673 * @name: name to find
675 * Find an interface by name.
676 * If the name is found a pointer to the device is returned.
677 * If the name is not found then %NULL is returned.
678 * The reference counters are not incremented so the caller must be
679 * careful with locks. The caller must hold RCU lock.
682 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
684 struct net_device *dev;
685 struct hlist_head *head = dev_name_hash(net, name);
687 hlist_for_each_entry_rcu(dev, head, name_hlist)
688 if (!strncmp(dev->name, name, IFNAMSIZ))
693 EXPORT_SYMBOL(dev_get_by_name_rcu);
696 * dev_get_by_name - find a device by its name
697 * @net: the applicable net namespace
698 * @name: name to find
700 * Find an interface by name. This can be called from any
701 * context and does its own locking. The returned handle has
702 * the usage count incremented and the caller must use dev_put() to
703 * release it when it is no longer needed. %NULL is returned if no
704 * matching device is found.
707 struct net_device *dev_get_by_name(struct net *net, const char *name)
709 struct net_device *dev;
712 dev = dev_get_by_name_rcu(net, name);
718 EXPORT_SYMBOL(dev_get_by_name);
721 * __dev_get_by_index - find a device by its ifindex
722 * @net: the applicable net namespace
723 * @ifindex: index of device
725 * Search for an interface by index. Returns %NULL if the device
726 * is not found or a pointer to the device. The device has not
727 * had its reference counter increased so the caller must be careful
728 * about locking. The caller must hold either the RTNL semaphore
732 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
734 struct net_device *dev;
735 struct hlist_head *head = dev_index_hash(net, ifindex);
737 hlist_for_each_entry(dev, head, index_hlist)
738 if (dev->ifindex == ifindex)
743 EXPORT_SYMBOL(__dev_get_by_index);
746 * dev_get_by_index_rcu - find a device by its ifindex
747 * @net: the applicable net namespace
748 * @ifindex: index of device
750 * Search for an interface by index. Returns %NULL if the device
751 * is not found or a pointer to the device. The device has not
752 * had its reference counter increased so the caller must be careful
753 * about locking. The caller must hold RCU lock.
756 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
758 struct net_device *dev;
759 struct hlist_head *head = dev_index_hash(net, ifindex);
761 hlist_for_each_entry_rcu(dev, head, index_hlist)
762 if (dev->ifindex == ifindex)
767 EXPORT_SYMBOL(dev_get_by_index_rcu);
771 * dev_get_by_index - find a device by its ifindex
772 * @net: the applicable net namespace
773 * @ifindex: index of device
775 * Search for an interface by index. Returns NULL if the device
776 * is not found or a pointer to the device. The device returned has
777 * had a reference added and the pointer is safe until the user calls
778 * dev_put to indicate they have finished with it.
781 struct net_device *dev_get_by_index(struct net *net, int ifindex)
783 struct net_device *dev;
786 dev = dev_get_by_index_rcu(net, ifindex);
792 EXPORT_SYMBOL(dev_get_by_index);
795 * netdev_get_name - get a netdevice name, knowing its ifindex.
796 * @net: network namespace
797 * @name: a pointer to the buffer where the name will be stored.
798 * @ifindex: the ifindex of the interface to get the name from.
800 * The use of raw_seqcount_begin() and cond_resched() before
801 * retrying is required as we want to give the writers a chance
802 * to complete when CONFIG_PREEMPT is not set.
804 int netdev_get_name(struct net *net, char *name, int ifindex)
806 struct net_device *dev;
810 seq = raw_seqcount_begin(&devnet_rename_seq);
812 dev = dev_get_by_index_rcu(net, ifindex);
818 strcpy(name, dev->name);
820 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
829 * dev_getbyhwaddr_rcu - find a device by its hardware address
830 * @net: the applicable net namespace
831 * @type: media type of device
832 * @ha: hardware address
834 * Search for an interface by MAC address. Returns NULL if the device
835 * is not found or a pointer to the device.
836 * The caller must hold RCU or RTNL.
837 * The returned device has not had its ref count increased
838 * and the caller must therefore be careful about locking
842 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
845 struct net_device *dev;
847 for_each_netdev_rcu(net, dev)
848 if (dev->type == type &&
849 !memcmp(dev->dev_addr, ha, dev->addr_len))
854 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
856 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
858 struct net_device *dev;
861 for_each_netdev(net, dev)
862 if (dev->type == type)
867 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
869 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
871 struct net_device *dev, *ret = NULL;
874 for_each_netdev_rcu(net, dev)
875 if (dev->type == type) {
883 EXPORT_SYMBOL(dev_getfirstbyhwtype);
886 * dev_get_by_flags_rcu - find any device with given flags
887 * @net: the applicable net namespace
888 * @if_flags: IFF_* values
889 * @mask: bitmask of bits in if_flags to check
891 * Search for any interface with the given flags. Returns NULL if a device
892 * is not found or a pointer to the device. Must be called inside
893 * rcu_read_lock(), and result refcount is unchanged.
896 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
899 struct net_device *dev, *ret;
902 for_each_netdev_rcu(net, dev) {
903 if (((dev->flags ^ if_flags) & mask) == 0) {
910 EXPORT_SYMBOL(dev_get_by_flags_rcu);
913 * dev_valid_name - check if name is okay for network device
916 * Network device names need to be valid file names to
917 * to allow sysfs to work. We also disallow any kind of
920 bool dev_valid_name(const char *name)
924 if (strlen(name) >= IFNAMSIZ)
926 if (!strcmp(name, ".") || !strcmp(name, ".."))
930 if (*name == '/' || isspace(*name))
936 EXPORT_SYMBOL(dev_valid_name);
939 * __dev_alloc_name - allocate a name for a device
940 * @net: network namespace to allocate the device name in
941 * @name: name format string
942 * @buf: scratch buffer and result name string
944 * Passed a format string - eg "lt%d" it will try and find a suitable
945 * id. It scans list of devices to build up a free map, then chooses
946 * the first empty slot. The caller must hold the dev_base or rtnl lock
947 * while allocating the name and adding the device in order to avoid
949 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
950 * Returns the number of the unit assigned or a negative errno code.
953 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
957 const int max_netdevices = 8*PAGE_SIZE;
958 unsigned long *inuse;
959 struct net_device *d;
961 p = strnchr(name, IFNAMSIZ-1, '%');
964 * Verify the string as this thing may have come from
965 * the user. There must be either one "%d" and no other "%"
968 if (p[1] != 'd' || strchr(p + 2, '%'))
971 /* Use one page as a bit array of possible slots */
972 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
976 for_each_netdev(net, d) {
977 if (!sscanf(d->name, name, &i))
979 if (i < 0 || i >= max_netdevices)
982 /* avoid cases where sscanf is not exact inverse of printf */
983 snprintf(buf, IFNAMSIZ, name, i);
984 if (!strncmp(buf, d->name, IFNAMSIZ))
988 i = find_first_zero_bit(inuse, max_netdevices);
989 free_page((unsigned long) inuse);
993 snprintf(buf, IFNAMSIZ, name, i);
994 if (!__dev_get_by_name(net, buf))
997 /* It is possible to run out of possible slots
998 * when the name is long and there isn't enough space left
999 * for the digits, or if all bits are used.
1005 * dev_alloc_name - allocate a name for a device
1007 * @name: name format string
1009 * Passed a format string - eg "lt%d" it will try and find a suitable
1010 * id. It scans list of devices to build up a free map, then chooses
1011 * the first empty slot. The caller must hold the dev_base or rtnl lock
1012 * while allocating the name and adding the device in order to avoid
1014 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1015 * Returns the number of the unit assigned or a negative errno code.
1018 int dev_alloc_name(struct net_device *dev, const char *name)
1024 BUG_ON(!dev_net(dev));
1026 ret = __dev_alloc_name(net, name, buf);
1028 strlcpy(dev->name, buf, IFNAMSIZ);
1031 EXPORT_SYMBOL(dev_alloc_name);
1033 static int dev_alloc_name_ns(struct net *net,
1034 struct net_device *dev,
1040 ret = __dev_alloc_name(net, name, buf);
1042 strlcpy(dev->name, buf, IFNAMSIZ);
1046 static int dev_get_valid_name(struct net *net,
1047 struct net_device *dev,
1052 if (!dev_valid_name(name))
1055 if (strchr(name, '%'))
1056 return dev_alloc_name_ns(net, dev, name);
1057 else if (__dev_get_by_name(net, name))
1059 else if (dev->name != name)
1060 strlcpy(dev->name, name, IFNAMSIZ);
1066 * dev_change_name - change name of a device
1068 * @newname: name (or format string) must be at least IFNAMSIZ
1070 * Change name of a device, can pass format strings "eth%d".
1073 int dev_change_name(struct net_device *dev, const char *newname)
1075 char oldname[IFNAMSIZ];
1081 BUG_ON(!dev_net(dev));
1084 if (dev->flags & IFF_UP)
1087 write_seqcount_begin(&devnet_rename_seq);
1089 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1090 write_seqcount_end(&devnet_rename_seq);
1094 memcpy(oldname, dev->name, IFNAMSIZ);
1096 err = dev_get_valid_name(net, dev, newname);
1098 write_seqcount_end(&devnet_rename_seq);
1103 ret = device_rename(&dev->dev, dev->name);
1105 memcpy(dev->name, oldname, IFNAMSIZ);
1106 write_seqcount_end(&devnet_rename_seq);
1110 write_seqcount_end(&devnet_rename_seq);
1112 write_lock_bh(&dev_base_lock);
1113 hlist_del_rcu(&dev->name_hlist);
1114 write_unlock_bh(&dev_base_lock);
1118 write_lock_bh(&dev_base_lock);
1119 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1120 write_unlock_bh(&dev_base_lock);
1122 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1123 ret = notifier_to_errno(ret);
1126 /* err >= 0 after dev_alloc_name() or stores the first errno */
1129 write_seqcount_begin(&devnet_rename_seq);
1130 memcpy(dev->name, oldname, IFNAMSIZ);
1133 pr_err("%s: name change rollback failed: %d\n",
1142 * dev_set_alias - change ifalias of a device
1144 * @alias: name up to IFALIASZ
1145 * @len: limit of bytes to copy from info
1147 * Set ifalias for a device,
1149 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1155 if (len >= IFALIASZ)
1159 kfree(dev->ifalias);
1160 dev->ifalias = NULL;
1164 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1167 dev->ifalias = new_ifalias;
1169 strlcpy(dev->ifalias, alias, len+1);
1175 * netdev_features_change - device changes features
1176 * @dev: device to cause notification
1178 * Called to indicate a device has changed features.
1180 void netdev_features_change(struct net_device *dev)
1182 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1184 EXPORT_SYMBOL(netdev_features_change);
1187 * netdev_state_change - device changes state
1188 * @dev: device to cause notification
1190 * Called to indicate a device has changed state. This function calls
1191 * the notifier chains for netdev_chain and sends a NEWLINK message
1192 * to the routing socket.
1194 void netdev_state_change(struct net_device *dev)
1196 if (dev->flags & IFF_UP) {
1197 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1198 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1201 EXPORT_SYMBOL(netdev_state_change);
1204 * netdev_notify_peers - notify network peers about existence of @dev
1205 * @dev: network device
1207 * Generate traffic such that interested network peers are aware of
1208 * @dev, such as by generating a gratuitous ARP. This may be used when
1209 * a device wants to inform the rest of the network about some sort of
1210 * reconfiguration such as a failover event or virtual machine
1213 void netdev_notify_peers(struct net_device *dev)
1216 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1219 EXPORT_SYMBOL(netdev_notify_peers);
1221 static int __dev_open(struct net_device *dev)
1223 const struct net_device_ops *ops = dev->netdev_ops;
1228 if (!netif_device_present(dev))
1231 /* Block netpoll from trying to do any rx path servicing.
1232 * If we don't do this there is a chance ndo_poll_controller
1233 * or ndo_poll may be running while we open the device
1235 ret = netpoll_rx_disable(dev);
1239 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1240 ret = notifier_to_errno(ret);
1244 set_bit(__LINK_STATE_START, &dev->state);
1246 if (ops->ndo_validate_addr)
1247 ret = ops->ndo_validate_addr(dev);
1249 if (!ret && ops->ndo_open)
1250 ret = ops->ndo_open(dev);
1252 netpoll_rx_enable(dev);
1255 clear_bit(__LINK_STATE_START, &dev->state);
1257 dev->flags |= IFF_UP;
1258 net_dmaengine_get();
1259 dev_set_rx_mode(dev);
1261 add_device_randomness(dev->dev_addr, dev->addr_len);
1268 * dev_open - prepare an interface for use.
1269 * @dev: device to open
1271 * Takes a device from down to up state. The device's private open
1272 * function is invoked and then the multicast lists are loaded. Finally
1273 * the device is moved into the up state and a %NETDEV_UP message is
1274 * sent to the netdev notifier chain.
1276 * Calling this function on an active interface is a nop. On a failure
1277 * a negative errno code is returned.
1279 int dev_open(struct net_device *dev)
1283 if (dev->flags & IFF_UP)
1286 ret = __dev_open(dev);
1290 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1291 call_netdevice_notifiers(NETDEV_UP, dev);
1295 EXPORT_SYMBOL(dev_open);
1297 static int __dev_close_many(struct list_head *head)
1299 struct net_device *dev;
1304 list_for_each_entry(dev, head, unreg_list) {
1305 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1307 clear_bit(__LINK_STATE_START, &dev->state);
1309 /* Synchronize to scheduled poll. We cannot touch poll list, it
1310 * can be even on different cpu. So just clear netif_running().
1312 * dev->stop() will invoke napi_disable() on all of it's
1313 * napi_struct instances on this device.
1315 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1318 dev_deactivate_many(head);
1320 list_for_each_entry(dev, head, unreg_list) {
1321 const struct net_device_ops *ops = dev->netdev_ops;
1324 * Call the device specific close. This cannot fail.
1325 * Only if device is UP
1327 * We allow it to be called even after a DETACH hot-plug
1333 dev->flags &= ~IFF_UP;
1334 net_dmaengine_put();
1340 static int __dev_close(struct net_device *dev)
1345 /* Temporarily disable netpoll until the interface is down */
1346 retval = netpoll_rx_disable(dev);
1350 list_add(&dev->unreg_list, &single);
1351 retval = __dev_close_many(&single);
1354 netpoll_rx_enable(dev);
1358 static int dev_close_many(struct list_head *head)
1360 struct net_device *dev, *tmp;
1361 LIST_HEAD(tmp_list);
1363 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1364 if (!(dev->flags & IFF_UP))
1365 list_move(&dev->unreg_list, &tmp_list);
1367 __dev_close_many(head);
1369 list_for_each_entry(dev, head, unreg_list) {
1370 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1371 call_netdevice_notifiers(NETDEV_DOWN, dev);
1374 /* rollback_registered_many needs the complete original list */
1375 list_splice(&tmp_list, head);
1380 * dev_close - shutdown an interface.
1381 * @dev: device to shutdown
1383 * This function moves an active device into down state. A
1384 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1385 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1388 int dev_close(struct net_device *dev)
1391 if (dev->flags & IFF_UP) {
1394 /* Block netpoll rx while the interface is going down */
1395 ret = netpoll_rx_disable(dev);
1399 list_add(&dev->unreg_list, &single);
1400 dev_close_many(&single);
1403 netpoll_rx_enable(dev);
1407 EXPORT_SYMBOL(dev_close);
1411 * dev_disable_lro - disable Large Receive Offload on a device
1414 * Disable Large Receive Offload (LRO) on a net device. Must be
1415 * called under RTNL. This is needed if received packets may be
1416 * forwarded to another interface.
1418 void dev_disable_lro(struct net_device *dev)
1421 * If we're trying to disable lro on a vlan device
1422 * use the underlying physical device instead
1424 if (is_vlan_dev(dev))
1425 dev = vlan_dev_real_dev(dev);
1427 dev->wanted_features &= ~NETIF_F_LRO;
1428 netdev_update_features(dev);
1430 if (unlikely(dev->features & NETIF_F_LRO))
1431 netdev_WARN(dev, "failed to disable LRO!\n");
1433 EXPORT_SYMBOL(dev_disable_lro);
1436 static int dev_boot_phase = 1;
1439 * register_netdevice_notifier - register a network notifier block
1442 * Register a notifier to be called when network device events occur.
1443 * The notifier passed is linked into the kernel structures and must
1444 * not be reused until it has been unregistered. A negative errno code
1445 * is returned on a failure.
1447 * When registered all registration and up events are replayed
1448 * to the new notifier to allow device to have a race free
1449 * view of the network device list.
1452 int register_netdevice_notifier(struct notifier_block *nb)
1454 struct net_device *dev;
1455 struct net_device *last;
1460 err = raw_notifier_chain_register(&netdev_chain, nb);
1466 for_each_netdev(net, dev) {
1467 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1468 err = notifier_to_errno(err);
1472 if (!(dev->flags & IFF_UP))
1475 nb->notifier_call(nb, NETDEV_UP, dev);
1486 for_each_netdev(net, dev) {
1490 if (dev->flags & IFF_UP) {
1491 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1492 nb->notifier_call(nb, NETDEV_DOWN, dev);
1494 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1499 raw_notifier_chain_unregister(&netdev_chain, nb);
1502 EXPORT_SYMBOL(register_netdevice_notifier);
1505 * unregister_netdevice_notifier - unregister a network notifier block
1508 * Unregister a notifier previously registered by
1509 * register_netdevice_notifier(). The notifier is unlinked into the
1510 * kernel structures and may then be reused. A negative errno code
1511 * is returned on a failure.
1513 * After unregistering unregister and down device events are synthesized
1514 * for all devices on the device list to the removed notifier to remove
1515 * the need for special case cleanup code.
1518 int unregister_netdevice_notifier(struct notifier_block *nb)
1520 struct net_device *dev;
1525 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1530 for_each_netdev(net, dev) {
1531 if (dev->flags & IFF_UP) {
1532 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1533 nb->notifier_call(nb, NETDEV_DOWN, dev);
1535 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1542 EXPORT_SYMBOL(unregister_netdevice_notifier);
1545 * call_netdevice_notifiers - call all network notifier blocks
1546 * @val: value passed unmodified to notifier function
1547 * @dev: net_device pointer passed unmodified to notifier function
1549 * Call all network notifier blocks. Parameters and return value
1550 * are as for raw_notifier_call_chain().
1553 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1556 return raw_notifier_call_chain(&netdev_chain, val, dev);
1558 EXPORT_SYMBOL(call_netdevice_notifiers);
1560 static struct static_key netstamp_needed __read_mostly;
1561 #ifdef HAVE_JUMP_LABEL
1562 /* We are not allowed to call static_key_slow_dec() from irq context
1563 * If net_disable_timestamp() is called from irq context, defer the
1564 * static_key_slow_dec() calls.
1566 static atomic_t netstamp_needed_deferred;
1569 void net_enable_timestamp(void)
1571 #ifdef HAVE_JUMP_LABEL
1572 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1576 static_key_slow_dec(&netstamp_needed);
1580 static_key_slow_inc(&netstamp_needed);
1582 EXPORT_SYMBOL(net_enable_timestamp);
1584 void net_disable_timestamp(void)
1586 #ifdef HAVE_JUMP_LABEL
1587 if (in_interrupt()) {
1588 atomic_inc(&netstamp_needed_deferred);
1592 static_key_slow_dec(&netstamp_needed);
1594 EXPORT_SYMBOL(net_disable_timestamp);
1596 static inline void net_timestamp_set(struct sk_buff *skb)
1598 skb->tstamp.tv64 = 0;
1599 if (static_key_false(&netstamp_needed))
1600 __net_timestamp(skb);
1603 #define net_timestamp_check(COND, SKB) \
1604 if (static_key_false(&netstamp_needed)) { \
1605 if ((COND) && !(SKB)->tstamp.tv64) \
1606 __net_timestamp(SKB); \
1609 static inline bool is_skb_forwardable(struct net_device *dev,
1610 struct sk_buff *skb)
1614 if (!(dev->flags & IFF_UP))
1617 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1618 if (skb->len <= len)
1621 /* if TSO is enabled, we don't care about the length as the packet
1622 * could be forwarded without being segmented before
1624 if (skb_is_gso(skb))
1631 * dev_forward_skb - loopback an skb to another netif
1633 * @dev: destination network device
1634 * @skb: buffer to forward
1637 * NET_RX_SUCCESS (no congestion)
1638 * NET_RX_DROP (packet was dropped, but freed)
1640 * dev_forward_skb can be used for injecting an skb from the
1641 * start_xmit function of one device into the receive queue
1642 * of another device.
1644 * The receiving device may be in another namespace, so
1645 * we have to clear all information in the skb that could
1646 * impact namespace isolation.
1648 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1650 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1651 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1652 atomic_long_inc(&dev->rx_dropped);
1660 if (unlikely(!is_skb_forwardable(dev, skb))) {
1661 atomic_long_inc(&dev->rx_dropped);
1668 skb->tstamp.tv64 = 0;
1669 skb->pkt_type = PACKET_HOST;
1670 skb->protocol = eth_type_trans(skb, dev);
1674 nf_reset_trace(skb);
1675 return netif_rx(skb);
1677 EXPORT_SYMBOL_GPL(dev_forward_skb);
1679 static inline int deliver_skb(struct sk_buff *skb,
1680 struct packet_type *pt_prev,
1681 struct net_device *orig_dev)
1683 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1685 atomic_inc(&skb->users);
1686 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1689 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1691 if (!ptype->af_packet_priv || !skb->sk)
1694 if (ptype->id_match)
1695 return ptype->id_match(ptype, skb->sk);
1696 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1703 * Support routine. Sends outgoing frames to any network
1704 * taps currently in use.
1707 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1709 struct packet_type *ptype;
1710 struct sk_buff *skb2 = NULL;
1711 struct packet_type *pt_prev = NULL;
1714 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1715 /* Never send packets back to the socket
1716 * they originated from - MvS (miquels@drinkel.ow.org)
1718 if ((ptype->dev == dev || !ptype->dev) &&
1719 (!skb_loop_sk(ptype, skb))) {
1721 deliver_skb(skb2, pt_prev, skb->dev);
1726 skb2 = skb_clone(skb, GFP_ATOMIC);
1730 net_timestamp_set(skb2);
1732 /* skb->nh should be correctly
1733 set by sender, so that the second statement is
1734 just protection against buggy protocols.
1736 skb_reset_mac_header(skb2);
1738 if (skb_network_header(skb2) < skb2->data ||
1739 skb2->network_header > skb2->tail) {
1740 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1741 ntohs(skb2->protocol),
1743 skb_reset_network_header(skb2);
1746 skb2->transport_header = skb2->network_header;
1747 skb2->pkt_type = PACKET_OUTGOING;
1752 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1757 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1758 * @dev: Network device
1759 * @txq: number of queues available
1761 * If real_num_tx_queues is changed the tc mappings may no longer be
1762 * valid. To resolve this verify the tc mapping remains valid and if
1763 * not NULL the mapping. With no priorities mapping to this
1764 * offset/count pair it will no longer be used. In the worst case TC0
1765 * is invalid nothing can be done so disable priority mappings. If is
1766 * expected that drivers will fix this mapping if they can before
1767 * calling netif_set_real_num_tx_queues.
1769 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1772 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1774 /* If TC0 is invalidated disable TC mapping */
1775 if (tc->offset + tc->count > txq) {
1776 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1781 /* Invalidated prio to tc mappings set to TC0 */
1782 for (i = 1; i < TC_BITMASK + 1; i++) {
1783 int q = netdev_get_prio_tc_map(dev, i);
1785 tc = &dev->tc_to_txq[q];
1786 if (tc->offset + tc->count > txq) {
1787 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1789 netdev_set_prio_tc_map(dev, i, 0);
1795 static DEFINE_MUTEX(xps_map_mutex);
1796 #define xmap_dereference(P) \
1797 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1799 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1802 struct xps_map *map = NULL;
1806 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1808 for (pos = 0; map && pos < map->len; pos++) {
1809 if (map->queues[pos] == index) {
1811 map->queues[pos] = map->queues[--map->len];
1813 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1814 kfree_rcu(map, rcu);
1824 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1826 struct xps_dev_maps *dev_maps;
1828 bool active = false;
1830 mutex_lock(&xps_map_mutex);
1831 dev_maps = xmap_dereference(dev->xps_maps);
1836 for_each_possible_cpu(cpu) {
1837 for (i = index; i < dev->num_tx_queues; i++) {
1838 if (!remove_xps_queue(dev_maps, cpu, i))
1841 if (i == dev->num_tx_queues)
1846 RCU_INIT_POINTER(dev->xps_maps, NULL);
1847 kfree_rcu(dev_maps, rcu);
1850 for (i = index; i < dev->num_tx_queues; i++)
1851 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1855 mutex_unlock(&xps_map_mutex);
1858 static struct xps_map *expand_xps_map(struct xps_map *map,
1861 struct xps_map *new_map;
1862 int alloc_len = XPS_MIN_MAP_ALLOC;
1865 for (pos = 0; map && pos < map->len; pos++) {
1866 if (map->queues[pos] != index)
1871 /* Need to add queue to this CPU's existing map */
1873 if (pos < map->alloc_len)
1876 alloc_len = map->alloc_len * 2;
1879 /* Need to allocate new map to store queue on this CPU's map */
1880 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1885 for (i = 0; i < pos; i++)
1886 new_map->queues[i] = map->queues[i];
1887 new_map->alloc_len = alloc_len;
1893 int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1895 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
1896 struct xps_map *map, *new_map;
1897 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
1898 int cpu, numa_node_id = -2;
1899 bool active = false;
1901 mutex_lock(&xps_map_mutex);
1903 dev_maps = xmap_dereference(dev->xps_maps);
1905 /* allocate memory for queue storage */
1906 for_each_online_cpu(cpu) {
1907 if (!cpumask_test_cpu(cpu, mask))
1911 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1912 if (!new_dev_maps) {
1913 mutex_unlock(&xps_map_mutex);
1917 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1920 map = expand_xps_map(map, cpu, index);
1924 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1928 goto out_no_new_maps;
1930 for_each_possible_cpu(cpu) {
1931 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1932 /* add queue to CPU maps */
1935 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1936 while ((pos < map->len) && (map->queues[pos] != index))
1939 if (pos == map->len)
1940 map->queues[map->len++] = index;
1942 if (numa_node_id == -2)
1943 numa_node_id = cpu_to_node(cpu);
1944 else if (numa_node_id != cpu_to_node(cpu))
1947 } else if (dev_maps) {
1948 /* fill in the new device map from the old device map */
1949 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1950 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1955 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1957 /* Cleanup old maps */
1959 for_each_possible_cpu(cpu) {
1960 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1961 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1962 if (map && map != new_map)
1963 kfree_rcu(map, rcu);
1966 kfree_rcu(dev_maps, rcu);
1969 dev_maps = new_dev_maps;
1973 /* update Tx queue numa node */
1974 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
1975 (numa_node_id >= 0) ? numa_node_id :
1981 /* removes queue from unused CPUs */
1982 for_each_possible_cpu(cpu) {
1983 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
1986 if (remove_xps_queue(dev_maps, cpu, index))
1990 /* free map if not active */
1992 RCU_INIT_POINTER(dev->xps_maps, NULL);
1993 kfree_rcu(dev_maps, rcu);
1997 mutex_unlock(&xps_map_mutex);
2001 /* remove any maps that we added */
2002 for_each_possible_cpu(cpu) {
2003 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2004 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2006 if (new_map && new_map != map)
2010 mutex_unlock(&xps_map_mutex);
2012 kfree(new_dev_maps);
2015 EXPORT_SYMBOL(netif_set_xps_queue);
2019 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2020 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2022 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2026 if (txq < 1 || txq > dev->num_tx_queues)
2029 if (dev->reg_state == NETREG_REGISTERED ||
2030 dev->reg_state == NETREG_UNREGISTERING) {
2033 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2039 netif_setup_tc(dev, txq);
2041 if (txq < dev->real_num_tx_queues) {
2042 qdisc_reset_all_tx_gt(dev, txq);
2044 netif_reset_xps_queues_gt(dev, txq);
2049 dev->real_num_tx_queues = txq;
2052 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2056 * netif_set_real_num_rx_queues - set actual number of RX queues used
2057 * @dev: Network device
2058 * @rxq: Actual number of RX queues
2060 * This must be called either with the rtnl_lock held or before
2061 * registration of the net device. Returns 0 on success, or a
2062 * negative error code. If called before registration, it always
2065 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2069 if (rxq < 1 || rxq > dev->num_rx_queues)
2072 if (dev->reg_state == NETREG_REGISTERED) {
2075 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2081 dev->real_num_rx_queues = rxq;
2084 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2088 * netif_get_num_default_rss_queues - default number of RSS queues
2090 * This routine should set an upper limit on the number of RSS queues
2091 * used by default by multiqueue devices.
2093 int netif_get_num_default_rss_queues(void)
2095 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2097 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2099 static inline void __netif_reschedule(struct Qdisc *q)
2101 struct softnet_data *sd;
2102 unsigned long flags;
2104 local_irq_save(flags);
2105 sd = &__get_cpu_var(softnet_data);
2106 q->next_sched = NULL;
2107 *sd->output_queue_tailp = q;
2108 sd->output_queue_tailp = &q->next_sched;
2109 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2110 local_irq_restore(flags);
2113 void __netif_schedule(struct Qdisc *q)
2115 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2116 __netif_reschedule(q);
2118 EXPORT_SYMBOL(__netif_schedule);
2120 void dev_kfree_skb_irq(struct sk_buff *skb)
2122 if (atomic_dec_and_test(&skb->users)) {
2123 struct softnet_data *sd;
2124 unsigned long flags;
2126 local_irq_save(flags);
2127 sd = &__get_cpu_var(softnet_data);
2128 skb->next = sd->completion_queue;
2129 sd->completion_queue = skb;
2130 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2131 local_irq_restore(flags);
2134 EXPORT_SYMBOL(dev_kfree_skb_irq);
2136 void dev_kfree_skb_any(struct sk_buff *skb)
2138 if (in_irq() || irqs_disabled())
2139 dev_kfree_skb_irq(skb);
2143 EXPORT_SYMBOL(dev_kfree_skb_any);
2147 * netif_device_detach - mark device as removed
2148 * @dev: network device
2150 * Mark device as removed from system and therefore no longer available.
2152 void netif_device_detach(struct net_device *dev)
2154 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2155 netif_running(dev)) {
2156 netif_tx_stop_all_queues(dev);
2159 EXPORT_SYMBOL(netif_device_detach);
2162 * netif_device_attach - mark device as attached
2163 * @dev: network device
2165 * Mark device as attached from system and restart if needed.
2167 void netif_device_attach(struct net_device *dev)
2169 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2170 netif_running(dev)) {
2171 netif_tx_wake_all_queues(dev);
2172 __netdev_watchdog_up(dev);
2175 EXPORT_SYMBOL(netif_device_attach);
2177 static void skb_warn_bad_offload(const struct sk_buff *skb)
2179 static const netdev_features_t null_features = 0;
2180 struct net_device *dev = skb->dev;
2181 const char *driver = "";
2183 if (!net_ratelimit())
2186 if (dev && dev->dev.parent)
2187 driver = dev_driver_string(dev->dev.parent);
2189 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2190 "gso_type=%d ip_summed=%d\n",
2191 driver, dev ? &dev->features : &null_features,
2192 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2193 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2194 skb_shinfo(skb)->gso_type, skb->ip_summed);
2198 * Invalidate hardware checksum when packet is to be mangled, and
2199 * complete checksum manually on outgoing path.
2201 int skb_checksum_help(struct sk_buff *skb)
2204 int ret = 0, offset;
2206 if (skb->ip_summed == CHECKSUM_COMPLETE)
2207 goto out_set_summed;
2209 if (unlikely(skb_shinfo(skb)->gso_size)) {
2210 skb_warn_bad_offload(skb);
2214 /* Before computing a checksum, we should make sure no frag could
2215 * be modified by an external entity : checksum could be wrong.
2217 if (skb_has_shared_frag(skb)) {
2218 ret = __skb_linearize(skb);
2223 offset = skb_checksum_start_offset(skb);
2224 BUG_ON(offset >= skb_headlen(skb));
2225 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2227 offset += skb->csum_offset;
2228 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2230 if (skb_cloned(skb) &&
2231 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2232 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2237 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2239 skb->ip_summed = CHECKSUM_NONE;
2243 EXPORT_SYMBOL(skb_checksum_help);
2245 __be16 skb_network_protocol(struct sk_buff *skb)
2247 __be16 type = skb->protocol;
2248 int vlan_depth = ETH_HLEN;
2250 /* Tunnel gso handlers can set protocol to ethernet. */
2251 if (type == htons(ETH_P_TEB)) {
2254 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2257 eth = (struct ethhdr *)skb_mac_header(skb);
2258 type = eth->h_proto;
2261 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2262 struct vlan_hdr *vh;
2264 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2267 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2268 type = vh->h_vlan_encapsulated_proto;
2269 vlan_depth += VLAN_HLEN;
2276 * skb_mac_gso_segment - mac layer segmentation handler.
2277 * @skb: buffer to segment
2278 * @features: features for the output path (see dev->features)
2280 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2281 netdev_features_t features)
2283 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2284 struct packet_offload *ptype;
2285 __be16 type = skb_network_protocol(skb);
2287 if (unlikely(!type))
2288 return ERR_PTR(-EINVAL);
2290 __skb_pull(skb, skb->mac_len);
2293 list_for_each_entry_rcu(ptype, &offload_base, list) {
2294 if (ptype->type == type && ptype->callbacks.gso_segment) {
2295 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2298 err = ptype->callbacks.gso_send_check(skb);
2299 segs = ERR_PTR(err);
2300 if (err || skb_gso_ok(skb, features))
2302 __skb_push(skb, (skb->data -
2303 skb_network_header(skb)));
2305 segs = ptype->callbacks.gso_segment(skb, features);
2311 __skb_push(skb, skb->data - skb_mac_header(skb));
2315 EXPORT_SYMBOL(skb_mac_gso_segment);
2318 /* openvswitch calls this on rx path, so we need a different check.
2320 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2323 return skb->ip_summed != CHECKSUM_PARTIAL;
2325 return skb->ip_summed == CHECKSUM_NONE;
2329 * __skb_gso_segment - Perform segmentation on skb.
2330 * @skb: buffer to segment
2331 * @features: features for the output path (see dev->features)
2332 * @tx_path: whether it is called in TX path
2334 * This function segments the given skb and returns a list of segments.
2336 * It may return NULL if the skb requires no segmentation. This is
2337 * only possible when GSO is used for verifying header integrity.
2339 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2340 netdev_features_t features, bool tx_path)
2342 if (unlikely(skb_needs_check(skb, tx_path))) {
2345 skb_warn_bad_offload(skb);
2347 if (skb_header_cloned(skb) &&
2348 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2349 return ERR_PTR(err);
2352 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2353 skb_reset_mac_header(skb);
2354 skb_reset_mac_len(skb);
2356 return skb_mac_gso_segment(skb, features);
2358 EXPORT_SYMBOL(__skb_gso_segment);
2360 /* Take action when hardware reception checksum errors are detected. */
2362 void netdev_rx_csum_fault(struct net_device *dev)
2364 if (net_ratelimit()) {
2365 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2369 EXPORT_SYMBOL(netdev_rx_csum_fault);
2372 /* Actually, we should eliminate this check as soon as we know, that:
2373 * 1. IOMMU is present and allows to map all the memory.
2374 * 2. No high memory really exists on this machine.
2377 static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
2379 #ifdef CONFIG_HIGHMEM
2381 if (!(dev->features & NETIF_F_HIGHDMA)) {
2382 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2383 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2384 if (PageHighMem(skb_frag_page(frag)))
2389 if (PCI_DMA_BUS_IS_PHYS) {
2390 struct device *pdev = dev->dev.parent;
2394 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2395 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2396 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2397 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2406 void (*destructor)(struct sk_buff *skb);
2409 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2411 static void dev_gso_skb_destructor(struct sk_buff *skb)
2413 struct dev_gso_cb *cb;
2416 struct sk_buff *nskb = skb->next;
2418 skb->next = nskb->next;
2421 } while (skb->next);
2423 cb = DEV_GSO_CB(skb);
2425 cb->destructor(skb);
2429 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2430 * @skb: buffer to segment
2431 * @features: device features as applicable to this skb
2433 * This function segments the given skb and stores the list of segments
2436 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2438 struct sk_buff *segs;
2440 segs = skb_gso_segment(skb, features);
2442 /* Verifying header integrity only. */
2447 return PTR_ERR(segs);
2450 DEV_GSO_CB(skb)->destructor = skb->destructor;
2451 skb->destructor = dev_gso_skb_destructor;
2456 static netdev_features_t harmonize_features(struct sk_buff *skb,
2458 const struct net_device *dev,
2459 netdev_features_t features)
2461 if (skb->ip_summed != CHECKSUM_NONE &&
2462 !can_checksum_protocol(features, protocol)) {
2463 features &= ~NETIF_F_ALL_CSUM;
2464 } else if (illegal_highdma(dev, skb)) {
2465 features &= ~NETIF_F_SG;
2471 netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
2472 const struct net_device *dev)
2474 __be16 protocol = skb->protocol;
2475 netdev_features_t features = dev->features;
2477 if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
2478 features &= ~NETIF_F_GSO_MASK;
2480 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2481 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2482 protocol = veh->h_vlan_encapsulated_proto;
2483 } else if (!vlan_tx_tag_present(skb)) {
2484 return harmonize_features(skb, protocol, dev, features);
2487 features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2488 NETIF_F_HW_VLAN_STAG_TX);
2490 if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) {
2491 return harmonize_features(skb, protocol, dev, features);
2493 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2494 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2495 NETIF_F_HW_VLAN_STAG_TX;
2496 return harmonize_features(skb, protocol, dev, features);
2499 return harmonize_features(skb, protocol, dev, features);
2501 EXPORT_SYMBOL(netif_skb_dev_features);
2504 * Returns true if either:
2505 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2506 * 2. skb is fragmented and the device does not support SG.
2508 static inline int skb_needs_linearize(struct sk_buff *skb,
2509 netdev_features_t features)
2511 return skb_is_nonlinear(skb) &&
2512 ((skb_has_frag_list(skb) &&
2513 !(features & NETIF_F_FRAGLIST)) ||
2514 (skb_shinfo(skb)->nr_frags &&
2515 !(features & NETIF_F_SG)));
2518 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2519 struct netdev_queue *txq)
2521 const struct net_device_ops *ops = dev->netdev_ops;
2522 int rc = NETDEV_TX_OK;
2523 unsigned int skb_len;
2525 if (likely(!skb->next)) {
2526 netdev_features_t features;
2529 * If device doesn't need skb->dst, release it right now while
2530 * its hot in this cpu cache
2532 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2535 features = netif_skb_features(skb);
2537 if (vlan_tx_tag_present(skb) &&
2538 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2539 skb = __vlan_put_tag(skb, skb->vlan_proto,
2540 vlan_tx_tag_get(skb));
2547 /* If encapsulation offload request, verify we are testing
2548 * hardware encapsulation features instead of standard
2549 * features for the netdev
2551 if (skb->encapsulation)
2552 features &= dev->hw_enc_features;
2554 if (netif_needs_gso(skb, features)) {
2555 if (unlikely(dev_gso_segment(skb, features)))
2560 if (skb_needs_linearize(skb, features) &&
2561 __skb_linearize(skb))
2564 /* If packet is not checksummed and device does not
2565 * support checksumming for this protocol, complete
2566 * checksumming here.
2568 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2569 if (skb->encapsulation)
2570 skb_set_inner_transport_header(skb,
2571 skb_checksum_start_offset(skb));
2573 skb_set_transport_header(skb,
2574 skb_checksum_start_offset(skb));
2575 if (!(features & NETIF_F_ALL_CSUM) &&
2576 skb_checksum_help(skb))
2581 if (!list_empty(&ptype_all))
2582 dev_queue_xmit_nit(skb, dev);
2585 rc = ops->ndo_start_xmit(skb, dev);
2586 trace_net_dev_xmit(skb, rc, dev, skb_len);
2587 if (rc == NETDEV_TX_OK)
2588 txq_trans_update(txq);
2594 struct sk_buff *nskb = skb->next;
2596 skb->next = nskb->next;
2599 if (!list_empty(&ptype_all))
2600 dev_queue_xmit_nit(nskb, dev);
2602 skb_len = nskb->len;
2603 rc = ops->ndo_start_xmit(nskb, dev);
2604 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2605 if (unlikely(rc != NETDEV_TX_OK)) {
2606 if (rc & ~NETDEV_TX_MASK)
2607 goto out_kfree_gso_skb;
2608 nskb->next = skb->next;
2612 txq_trans_update(txq);
2613 if (unlikely(netif_xmit_stopped(txq) && skb->next))
2614 return NETDEV_TX_BUSY;
2615 } while (skb->next);
2618 if (likely(skb->next == NULL)) {
2619 skb->destructor = DEV_GSO_CB(skb)->destructor;
2629 static void qdisc_pkt_len_init(struct sk_buff *skb)
2631 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2633 qdisc_skb_cb(skb)->pkt_len = skb->len;
2635 /* To get more precise estimation of bytes sent on wire,
2636 * we add to pkt_len the headers size of all segments
2638 if (shinfo->gso_size) {
2639 unsigned int hdr_len;
2640 u16 gso_segs = shinfo->gso_segs;
2642 /* mac layer + network layer */
2643 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2645 /* + transport layer */
2646 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2647 hdr_len += tcp_hdrlen(skb);
2649 hdr_len += sizeof(struct udphdr);
2651 if (shinfo->gso_type & SKB_GSO_DODGY)
2652 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2655 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
2659 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2660 struct net_device *dev,
2661 struct netdev_queue *txq)
2663 spinlock_t *root_lock = qdisc_lock(q);
2667 qdisc_pkt_len_init(skb);
2668 qdisc_calculate_pkt_len(skb, q);
2670 * Heuristic to force contended enqueues to serialize on a
2671 * separate lock before trying to get qdisc main lock.
2672 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2673 * and dequeue packets faster.
2675 contended = qdisc_is_running(q);
2676 if (unlikely(contended))
2677 spin_lock(&q->busylock);
2679 spin_lock(root_lock);
2680 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2683 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2684 qdisc_run_begin(q)) {
2686 * This is a work-conserving queue; there are no old skbs
2687 * waiting to be sent out; and the qdisc is not running -
2688 * xmit the skb directly.
2690 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2693 qdisc_bstats_update(q, skb);
2695 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2696 if (unlikely(contended)) {
2697 spin_unlock(&q->busylock);
2704 rc = NET_XMIT_SUCCESS;
2707 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2708 if (qdisc_run_begin(q)) {
2709 if (unlikely(contended)) {
2710 spin_unlock(&q->busylock);
2716 spin_unlock(root_lock);
2717 if (unlikely(contended))
2718 spin_unlock(&q->busylock);
2722 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2723 static void skb_update_prio(struct sk_buff *skb)
2725 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2727 if (!skb->priority && skb->sk && map) {
2728 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2730 if (prioidx < map->priomap_len)
2731 skb->priority = map->priomap[prioidx];
2735 #define skb_update_prio(skb)
2738 static DEFINE_PER_CPU(int, xmit_recursion);
2739 #define RECURSION_LIMIT 10
2742 * dev_loopback_xmit - loop back @skb
2743 * @skb: buffer to transmit
2745 int dev_loopback_xmit(struct sk_buff *skb)
2747 skb_reset_mac_header(skb);
2748 __skb_pull(skb, skb_network_offset(skb));
2749 skb->pkt_type = PACKET_LOOPBACK;
2750 skb->ip_summed = CHECKSUM_UNNECESSARY;
2751 WARN_ON(!skb_dst(skb));
2756 EXPORT_SYMBOL(dev_loopback_xmit);
2759 * dev_queue_xmit - transmit a buffer
2760 * @skb: buffer to transmit
2762 * Queue a buffer for transmission to a network device. The caller must
2763 * have set the device and priority and built the buffer before calling
2764 * this function. The function can be called from an interrupt.
2766 * A negative errno code is returned on a failure. A success does not
2767 * guarantee the frame will be transmitted as it may be dropped due
2768 * to congestion or traffic shaping.
2770 * -----------------------------------------------------------------------------------
2771 * I notice this method can also return errors from the queue disciplines,
2772 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2775 * Regardless of the return value, the skb is consumed, so it is currently
2776 * difficult to retry a send to this method. (You can bump the ref count
2777 * before sending to hold a reference for retry if you are careful.)
2779 * When calling this method, interrupts MUST be enabled. This is because
2780 * the BH enable code must have IRQs enabled so that it will not deadlock.
2783 int dev_queue_xmit(struct sk_buff *skb)
2785 struct net_device *dev = skb->dev;
2786 struct netdev_queue *txq;
2790 skb_reset_mac_header(skb);
2792 /* Disable soft irqs for various locks below. Also
2793 * stops preemption for RCU.
2797 skb_update_prio(skb);
2799 txq = netdev_pick_tx(dev, skb);
2800 q = rcu_dereference_bh(txq->qdisc);
2802 #ifdef CONFIG_NET_CLS_ACT
2803 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2805 trace_net_dev_queue(skb);
2807 rc = __dev_xmit_skb(skb, q, dev, txq);
2811 /* The device has no queue. Common case for software devices:
2812 loopback, all the sorts of tunnels...
2814 Really, it is unlikely that netif_tx_lock protection is necessary
2815 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2817 However, it is possible, that they rely on protection
2820 Check this and shot the lock. It is not prone from deadlocks.
2821 Either shot noqueue qdisc, it is even simpler 8)
2823 if (dev->flags & IFF_UP) {
2824 int cpu = smp_processor_id(); /* ok because BHs are off */
2826 if (txq->xmit_lock_owner != cpu) {
2828 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2829 goto recursion_alert;
2831 HARD_TX_LOCK(dev, txq, cpu);
2833 if (!netif_xmit_stopped(txq)) {
2834 __this_cpu_inc(xmit_recursion);
2835 rc = dev_hard_start_xmit(skb, dev, txq);
2836 __this_cpu_dec(xmit_recursion);
2837 if (dev_xmit_complete(rc)) {
2838 HARD_TX_UNLOCK(dev, txq);
2842 HARD_TX_UNLOCK(dev, txq);
2843 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2846 /* Recursion is detected! It is possible,
2850 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2856 rcu_read_unlock_bh();
2861 rcu_read_unlock_bh();
2864 EXPORT_SYMBOL(dev_queue_xmit);
2867 /*=======================================================================
2869 =======================================================================*/
2871 int netdev_max_backlog __read_mostly = 1000;
2872 EXPORT_SYMBOL(netdev_max_backlog);
2874 int netdev_tstamp_prequeue __read_mostly = 1;
2875 int netdev_budget __read_mostly = 300;
2876 int weight_p __read_mostly = 64; /* old backlog weight */
2878 /* Called with irq disabled */
2879 static inline void ____napi_schedule(struct softnet_data *sd,
2880 struct napi_struct *napi)
2882 list_add_tail(&napi->poll_list, &sd->poll_list);
2883 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2888 /* One global table that all flow-based protocols share. */
2889 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2890 EXPORT_SYMBOL(rps_sock_flow_table);
2892 struct static_key rps_needed __read_mostly;
2894 static struct rps_dev_flow *
2895 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2896 struct rps_dev_flow *rflow, u16 next_cpu)
2898 if (next_cpu != RPS_NO_CPU) {
2899 #ifdef CONFIG_RFS_ACCEL
2900 struct netdev_rx_queue *rxqueue;
2901 struct rps_dev_flow_table *flow_table;
2902 struct rps_dev_flow *old_rflow;
2907 /* Should we steer this flow to a different hardware queue? */
2908 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2909 !(dev->features & NETIF_F_NTUPLE))
2911 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2912 if (rxq_index == skb_get_rx_queue(skb))
2915 rxqueue = dev->_rx + rxq_index;
2916 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2919 flow_id = skb->rxhash & flow_table->mask;
2920 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2921 rxq_index, flow_id);
2925 rflow = &flow_table->flows[flow_id];
2927 if (old_rflow->filter == rflow->filter)
2928 old_rflow->filter = RPS_NO_FILTER;
2932 per_cpu(softnet_data, next_cpu).input_queue_head;
2935 rflow->cpu = next_cpu;
2940 * get_rps_cpu is called from netif_receive_skb and returns the target
2941 * CPU from the RPS map of the receiving queue for a given skb.
2942 * rcu_read_lock must be held on entry.
2944 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2945 struct rps_dev_flow **rflowp)
2947 struct netdev_rx_queue *rxqueue;
2948 struct rps_map *map;
2949 struct rps_dev_flow_table *flow_table;
2950 struct rps_sock_flow_table *sock_flow_table;
2954 if (skb_rx_queue_recorded(skb)) {
2955 u16 index = skb_get_rx_queue(skb);
2956 if (unlikely(index >= dev->real_num_rx_queues)) {
2957 WARN_ONCE(dev->real_num_rx_queues > 1,
2958 "%s received packet on queue %u, but number "
2959 "of RX queues is %u\n",
2960 dev->name, index, dev->real_num_rx_queues);
2963 rxqueue = dev->_rx + index;
2967 map = rcu_dereference(rxqueue->rps_map);
2969 if (map->len == 1 &&
2970 !rcu_access_pointer(rxqueue->rps_flow_table)) {
2971 tcpu = map->cpus[0];
2972 if (cpu_online(tcpu))
2976 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2980 skb_reset_network_header(skb);
2981 if (!skb_get_rxhash(skb))
2984 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2985 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2986 if (flow_table && sock_flow_table) {
2988 struct rps_dev_flow *rflow;
2990 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2993 next_cpu = sock_flow_table->ents[skb->rxhash &
2994 sock_flow_table->mask];
2997 * If the desired CPU (where last recvmsg was done) is
2998 * different from current CPU (one in the rx-queue flow
2999 * table entry), switch if one of the following holds:
3000 * - Current CPU is unset (equal to RPS_NO_CPU).
3001 * - Current CPU is offline.
3002 * - The current CPU's queue tail has advanced beyond the
3003 * last packet that was enqueued using this table entry.
3004 * This guarantees that all previous packets for the flow
3005 * have been dequeued, thus preserving in order delivery.
3007 if (unlikely(tcpu != next_cpu) &&
3008 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3009 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3010 rflow->last_qtail)) >= 0)) {
3012 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3015 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3023 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
3025 if (cpu_online(tcpu)) {
3035 #ifdef CONFIG_RFS_ACCEL
3038 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3039 * @dev: Device on which the filter was set
3040 * @rxq_index: RX queue index
3041 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3042 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3044 * Drivers that implement ndo_rx_flow_steer() should periodically call
3045 * this function for each installed filter and remove the filters for
3046 * which it returns %true.
3048 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3049 u32 flow_id, u16 filter_id)
3051 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3052 struct rps_dev_flow_table *flow_table;
3053 struct rps_dev_flow *rflow;
3058 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3059 if (flow_table && flow_id <= flow_table->mask) {
3060 rflow = &flow_table->flows[flow_id];
3061 cpu = ACCESS_ONCE(rflow->cpu);
3062 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3063 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3064 rflow->last_qtail) <
3065 (int)(10 * flow_table->mask)))
3071 EXPORT_SYMBOL(rps_may_expire_flow);
3073 #endif /* CONFIG_RFS_ACCEL */
3075 /* Called from hardirq (IPI) context */
3076 static void rps_trigger_softirq(void *data)
3078 struct softnet_data *sd = data;
3080 ____napi_schedule(sd, &sd->backlog);
3084 #endif /* CONFIG_RPS */
3087 * Check if this softnet_data structure is another cpu one
3088 * If yes, queue it to our IPI list and return 1
3091 static int rps_ipi_queued(struct softnet_data *sd)
3094 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3097 sd->rps_ipi_next = mysd->rps_ipi_list;
3098 mysd->rps_ipi_list = sd;
3100 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3103 #endif /* CONFIG_RPS */
3108 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3109 * queue (may be a remote CPU queue).
3111 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3112 unsigned int *qtail)
3114 struct softnet_data *sd;
3115 unsigned long flags;
3117 sd = &per_cpu(softnet_data, cpu);
3119 local_irq_save(flags);
3122 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
3123 if (skb_queue_len(&sd->input_pkt_queue)) {
3125 __skb_queue_tail(&sd->input_pkt_queue, skb);
3126 input_queue_tail_incr_save(sd, qtail);
3128 local_irq_restore(flags);
3129 return NET_RX_SUCCESS;
3132 /* Schedule NAPI for backlog device
3133 * We can use non atomic operation since we own the queue lock
3135 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3136 if (!rps_ipi_queued(sd))
3137 ____napi_schedule(sd, &sd->backlog);
3145 local_irq_restore(flags);
3147 atomic_long_inc(&skb->dev->rx_dropped);
3153 * netif_rx - post buffer to the network code
3154 * @skb: buffer to post
3156 * This function receives a packet from a device driver and queues it for
3157 * the upper (protocol) levels to process. It always succeeds. The buffer
3158 * may be dropped during processing for congestion control or by the
3162 * NET_RX_SUCCESS (no congestion)
3163 * NET_RX_DROP (packet was dropped)
3167 int netif_rx(struct sk_buff *skb)
3171 /* if netpoll wants it, pretend we never saw it */
3172 if (netpoll_rx(skb))
3175 net_timestamp_check(netdev_tstamp_prequeue, skb);
3177 trace_netif_rx(skb);
3179 if (static_key_false(&rps_needed)) {
3180 struct rps_dev_flow voidflow, *rflow = &voidflow;
3186 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3188 cpu = smp_processor_id();
3190 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3198 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3203 EXPORT_SYMBOL(netif_rx);
3205 int netif_rx_ni(struct sk_buff *skb)
3210 err = netif_rx(skb);
3211 if (local_softirq_pending())
3217 EXPORT_SYMBOL(netif_rx_ni);
3219 static void net_tx_action(struct softirq_action *h)
3221 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3223 if (sd->completion_queue) {
3224 struct sk_buff *clist;
3226 local_irq_disable();
3227 clist = sd->completion_queue;
3228 sd->completion_queue = NULL;
3232 struct sk_buff *skb = clist;
3233 clist = clist->next;
3235 WARN_ON(atomic_read(&skb->users));
3236 trace_kfree_skb(skb, net_tx_action);
3241 if (sd->output_queue) {
3244 local_irq_disable();
3245 head = sd->output_queue;
3246 sd->output_queue = NULL;
3247 sd->output_queue_tailp = &sd->output_queue;
3251 struct Qdisc *q = head;
3252 spinlock_t *root_lock;
3254 head = head->next_sched;
3256 root_lock = qdisc_lock(q);
3257 if (spin_trylock(root_lock)) {
3258 smp_mb__before_clear_bit();
3259 clear_bit(__QDISC_STATE_SCHED,
3262 spin_unlock(root_lock);
3264 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3266 __netif_reschedule(q);
3268 smp_mb__before_clear_bit();
3269 clear_bit(__QDISC_STATE_SCHED,
3277 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3278 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3279 /* This hook is defined here for ATM LANE */
3280 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3281 unsigned char *addr) __read_mostly;
3282 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3285 #ifdef CONFIG_NET_CLS_ACT
3286 /* TODO: Maybe we should just force sch_ingress to be compiled in
3287 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3288 * a compare and 2 stores extra right now if we dont have it on
3289 * but have CONFIG_NET_CLS_ACT
3290 * NOTE: This doesn't stop any functionality; if you dont have
3291 * the ingress scheduler, you just can't add policies on ingress.
3294 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3296 struct net_device *dev = skb->dev;
3297 u32 ttl = G_TC_RTTL(skb->tc_verd);
3298 int result = TC_ACT_OK;
3301 if (unlikely(MAX_RED_LOOP < ttl++)) {
3302 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3303 skb->skb_iif, dev->ifindex);
3307 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3308 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3311 if (q != &noop_qdisc) {
3312 spin_lock(qdisc_lock(q));
3313 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3314 result = qdisc_enqueue_root(skb, q);
3315 spin_unlock(qdisc_lock(q));
3321 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3322 struct packet_type **pt_prev,
3323 int *ret, struct net_device *orig_dev)
3325 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3327 if (!rxq || rxq->qdisc == &noop_qdisc)
3331 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3335 switch (ing_filter(skb, rxq)) {
3349 * netdev_rx_handler_register - register receive handler
3350 * @dev: device to register a handler for
3351 * @rx_handler: receive handler to register
3352 * @rx_handler_data: data pointer that is used by rx handler
3354 * Register a receive hander for a device. This handler will then be
3355 * called from __netif_receive_skb. A negative errno code is returned
3358 * The caller must hold the rtnl_mutex.
3360 * For a general description of rx_handler, see enum rx_handler_result.
3362 int netdev_rx_handler_register(struct net_device *dev,
3363 rx_handler_func_t *rx_handler,
3364 void *rx_handler_data)
3368 if (dev->rx_handler)
3371 /* Note: rx_handler_data must be set before rx_handler */
3372 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3373 rcu_assign_pointer(dev->rx_handler, rx_handler);
3377 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3380 * netdev_rx_handler_unregister - unregister receive handler
3381 * @dev: device to unregister a handler from
3383 * Unregister a receive handler from a device.
3385 * The caller must hold the rtnl_mutex.
3387 void netdev_rx_handler_unregister(struct net_device *dev)
3391 RCU_INIT_POINTER(dev->rx_handler, NULL);
3392 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3393 * section has a guarantee to see a non NULL rx_handler_data
3397 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3399 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3402 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3403 * the special handling of PFMEMALLOC skbs.
3405 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3407 switch (skb->protocol) {
3408 case __constant_htons(ETH_P_ARP):
3409 case __constant_htons(ETH_P_IP):
3410 case __constant_htons(ETH_P_IPV6):
3411 case __constant_htons(ETH_P_8021Q):
3412 case __constant_htons(ETH_P_8021AD):
3419 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3421 struct packet_type *ptype, *pt_prev;
3422 rx_handler_func_t *rx_handler;
3423 struct net_device *orig_dev;
3424 struct net_device *null_or_dev;
3425 bool deliver_exact = false;
3426 int ret = NET_RX_DROP;
3429 net_timestamp_check(!netdev_tstamp_prequeue, skb);
3431 trace_netif_receive_skb(skb);
3433 /* if we've gotten here through NAPI, check netpoll */
3434 if (netpoll_receive_skb(skb))
3437 orig_dev = skb->dev;
3439 skb_reset_network_header(skb);
3440 if (!skb_transport_header_was_set(skb))
3441 skb_reset_transport_header(skb);
3442 skb_reset_mac_len(skb);
3449 skb->skb_iif = skb->dev->ifindex;
3451 __this_cpu_inc(softnet_data.processed);
3453 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3454 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3455 skb = vlan_untag(skb);
3460 #ifdef CONFIG_NET_CLS_ACT
3461 if (skb->tc_verd & TC_NCLS) {
3462 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3470 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3471 if (!ptype->dev || ptype->dev == skb->dev) {
3473 ret = deliver_skb(skb, pt_prev, orig_dev);
3479 #ifdef CONFIG_NET_CLS_ACT
3480 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3486 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3489 if (vlan_tx_tag_present(skb)) {
3491 ret = deliver_skb(skb, pt_prev, orig_dev);
3494 if (vlan_do_receive(&skb))
3496 else if (unlikely(!skb))
3500 rx_handler = rcu_dereference(skb->dev->rx_handler);
3503 ret = deliver_skb(skb, pt_prev, orig_dev);
3506 switch (rx_handler(&skb)) {
3507 case RX_HANDLER_CONSUMED:
3508 ret = NET_RX_SUCCESS;
3510 case RX_HANDLER_ANOTHER:
3512 case RX_HANDLER_EXACT:
3513 deliver_exact = true;
3514 case RX_HANDLER_PASS:
3521 if (unlikely(vlan_tx_tag_present(skb))) {
3522 if (vlan_tx_tag_get_id(skb))
3523 skb->pkt_type = PACKET_OTHERHOST;
3524 /* Note: we might in the future use prio bits
3525 * and set skb->priority like in vlan_do_receive()
3526 * For the time being, just ignore Priority Code Point
3531 /* deliver only exact match when indicated */
3532 null_or_dev = deliver_exact ? skb->dev : NULL;
3534 type = skb->protocol;
3535 list_for_each_entry_rcu(ptype,
3536 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3537 if (ptype->type == type &&
3538 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3539 ptype->dev == orig_dev)) {
3541 ret = deliver_skb(skb, pt_prev, orig_dev);
3547 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3550 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3553 atomic_long_inc(&skb->dev->rx_dropped);
3555 /* Jamal, now you will not able to escape explaining
3556 * me how you were going to use this. :-)
3567 static int __netif_receive_skb(struct sk_buff *skb)
3571 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3572 unsigned long pflags = current->flags;
3575 * PFMEMALLOC skbs are special, they should
3576 * - be delivered to SOCK_MEMALLOC sockets only
3577 * - stay away from userspace
3578 * - have bounded memory usage
3580 * Use PF_MEMALLOC as this saves us from propagating the allocation
3581 * context down to all allocation sites.
3583 current->flags |= PF_MEMALLOC;
3584 ret = __netif_receive_skb_core(skb, true);
3585 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3587 ret = __netif_receive_skb_core(skb, false);
3593 * netif_receive_skb - process receive buffer from network
3594 * @skb: buffer to process
3596 * netif_receive_skb() is the main receive data processing function.
3597 * It always succeeds. The buffer may be dropped during processing
3598 * for congestion control or by the protocol layers.
3600 * This function may only be called from softirq context and interrupts
3601 * should be enabled.
3603 * Return values (usually ignored):
3604 * NET_RX_SUCCESS: no congestion
3605 * NET_RX_DROP: packet was dropped
3607 int netif_receive_skb(struct sk_buff *skb)
3609 net_timestamp_check(netdev_tstamp_prequeue, skb);
3611 if (skb_defer_rx_timestamp(skb))
3612 return NET_RX_SUCCESS;
3615 if (static_key_false(&rps_needed)) {
3616 struct rps_dev_flow voidflow, *rflow = &voidflow;
3621 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3624 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3631 return __netif_receive_skb(skb);
3633 EXPORT_SYMBOL(netif_receive_skb);
3635 /* Network device is going away, flush any packets still pending
3636 * Called with irqs disabled.
3638 static void flush_backlog(void *arg)
3640 struct net_device *dev = arg;
3641 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3642 struct sk_buff *skb, *tmp;
3645 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3646 if (skb->dev == dev) {
3647 __skb_unlink(skb, &sd->input_pkt_queue);
3649 input_queue_head_incr(sd);
3654 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3655 if (skb->dev == dev) {
3656 __skb_unlink(skb, &sd->process_queue);
3658 input_queue_head_incr(sd);
3663 static int napi_gro_complete(struct sk_buff *skb)
3665 struct packet_offload *ptype;
3666 __be16 type = skb->protocol;
3667 struct list_head *head = &offload_base;
3670 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3672 if (NAPI_GRO_CB(skb)->count == 1) {
3673 skb_shinfo(skb)->gso_size = 0;
3678 list_for_each_entry_rcu(ptype, head, list) {
3679 if (ptype->type != type || !ptype->callbacks.gro_complete)
3682 err = ptype->callbacks.gro_complete(skb);
3688 WARN_ON(&ptype->list == head);
3690 return NET_RX_SUCCESS;
3694 return netif_receive_skb(skb);
3697 /* napi->gro_list contains packets ordered by age.
3698 * youngest packets at the head of it.
3699 * Complete skbs in reverse order to reduce latencies.
3701 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
3703 struct sk_buff *skb, *prev = NULL;
3705 /* scan list and build reverse chain */
3706 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3711 for (skb = prev; skb; skb = prev) {
3714 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3718 napi_gro_complete(skb);
3722 napi->gro_list = NULL;
3724 EXPORT_SYMBOL(napi_gro_flush);
3726 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3729 unsigned int maclen = skb->dev->hard_header_len;
3731 for (p = napi->gro_list; p; p = p->next) {
3732 unsigned long diffs;
3734 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3735 diffs |= p->vlan_tci ^ skb->vlan_tci;
3736 if (maclen == ETH_HLEN)
3737 diffs |= compare_ether_header(skb_mac_header(p),
3738 skb_gro_mac_header(skb));
3740 diffs = memcmp(skb_mac_header(p),
3741 skb_gro_mac_header(skb),
3743 NAPI_GRO_CB(p)->same_flow = !diffs;
3744 NAPI_GRO_CB(p)->flush = 0;
3748 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3750 struct sk_buff **pp = NULL;
3751 struct packet_offload *ptype;
3752 __be16 type = skb->protocol;
3753 struct list_head *head = &offload_base;
3755 enum gro_result ret;
3757 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3760 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3763 gro_list_prepare(napi, skb);
3766 list_for_each_entry_rcu(ptype, head, list) {
3767 if (ptype->type != type || !ptype->callbacks.gro_receive)
3770 skb_set_network_header(skb, skb_gro_offset(skb));
3771 skb_reset_mac_len(skb);
3772 NAPI_GRO_CB(skb)->same_flow = 0;
3773 NAPI_GRO_CB(skb)->flush = 0;
3774 NAPI_GRO_CB(skb)->free = 0;
3776 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
3781 if (&ptype->list == head)
3784 same_flow = NAPI_GRO_CB(skb)->same_flow;
3785 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3788 struct sk_buff *nskb = *pp;
3792 napi_gro_complete(nskb);
3799 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3803 NAPI_GRO_CB(skb)->count = 1;
3804 NAPI_GRO_CB(skb)->age = jiffies;
3805 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3806 skb->next = napi->gro_list;
3807 napi->gro_list = skb;
3811 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3812 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3814 BUG_ON(skb->end - skb->tail < grow);
3816 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3819 skb->data_len -= grow;
3821 skb_shinfo(skb)->frags[0].page_offset += grow;
3822 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3824 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3825 skb_frag_unref(skb, 0);
3826 memmove(skb_shinfo(skb)->frags,
3827 skb_shinfo(skb)->frags + 1,
3828 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3841 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3845 if (netif_receive_skb(skb))
3853 case GRO_MERGED_FREE:
3854 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3855 kmem_cache_free(skbuff_head_cache, skb);
3868 static void skb_gro_reset_offset(struct sk_buff *skb)
3870 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3871 const skb_frag_t *frag0 = &pinfo->frags[0];
3873 NAPI_GRO_CB(skb)->data_offset = 0;
3874 NAPI_GRO_CB(skb)->frag0 = NULL;
3875 NAPI_GRO_CB(skb)->frag0_len = 0;
3877 if (skb->mac_header == skb->tail &&
3879 !PageHighMem(skb_frag_page(frag0))) {
3880 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3881 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3885 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3887 skb_gro_reset_offset(skb);
3889 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
3891 EXPORT_SYMBOL(napi_gro_receive);
3893 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3895 __skb_pull(skb, skb_headlen(skb));
3896 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3897 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3899 skb->dev = napi->dev;
3901 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
3906 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3908 struct sk_buff *skb = napi->skb;
3911 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3917 EXPORT_SYMBOL(napi_get_frags);
3919 static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3925 skb->protocol = eth_type_trans(skb, skb->dev);
3927 if (ret == GRO_HELD)
3928 skb_gro_pull(skb, -ETH_HLEN);
3929 else if (netif_receive_skb(skb))
3934 case GRO_MERGED_FREE:
3935 napi_reuse_skb(napi, skb);
3945 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3947 struct sk_buff *skb = napi->skb;
3954 skb_reset_mac_header(skb);
3955 skb_gro_reset_offset(skb);
3957 off = skb_gro_offset(skb);
3958 hlen = off + sizeof(*eth);
3959 eth = skb_gro_header_fast(skb, off);
3960 if (skb_gro_header_hard(skb, hlen)) {
3961 eth = skb_gro_header_slow(skb, hlen, off);
3962 if (unlikely(!eth)) {
3963 napi_reuse_skb(napi, skb);
3969 skb_gro_pull(skb, sizeof(*eth));
3972 * This works because the only protocols we care about don't require
3973 * special handling. We'll fix it up properly at the end.
3975 skb->protocol = eth->h_proto;
3981 gro_result_t napi_gro_frags(struct napi_struct *napi)
3983 struct sk_buff *skb = napi_frags_skb(napi);
3988 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
3990 EXPORT_SYMBOL(napi_gro_frags);
3993 * net_rps_action sends any pending IPI's for rps.
3994 * Note: called with local irq disabled, but exits with local irq enabled.
3996 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3999 struct softnet_data *remsd = sd->rps_ipi_list;
4002 sd->rps_ipi_list = NULL;
4006 /* Send pending IPI's to kick RPS processing on remote cpus. */
4008 struct softnet_data *next = remsd->rps_ipi_next;
4010 if (cpu_online(remsd->cpu))
4011 __smp_call_function_single(remsd->cpu,
4020 static int process_backlog(struct napi_struct *napi, int quota)
4023 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4026 /* Check if we have pending ipi, its better to send them now,
4027 * not waiting net_rx_action() end.
4029 if (sd->rps_ipi_list) {
4030 local_irq_disable();
4031 net_rps_action_and_irq_enable(sd);
4034 napi->weight = weight_p;
4035 local_irq_disable();
4036 while (work < quota) {
4037 struct sk_buff *skb;
4040 while ((skb = __skb_dequeue(&sd->process_queue))) {
4042 __netif_receive_skb(skb);
4043 local_irq_disable();
4044 input_queue_head_incr(sd);
4045 if (++work >= quota) {
4052 qlen = skb_queue_len(&sd->input_pkt_queue);
4054 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4055 &sd->process_queue);
4057 if (qlen < quota - work) {
4059 * Inline a custom version of __napi_complete().
4060 * only current cpu owns and manipulates this napi,
4061 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4062 * we can use a plain write instead of clear_bit(),
4063 * and we dont need an smp_mb() memory barrier.
4065 list_del(&napi->poll_list);
4068 quota = work + qlen;
4078 * __napi_schedule - schedule for receive
4079 * @n: entry to schedule
4081 * The entry's receive function will be scheduled to run
4083 void __napi_schedule(struct napi_struct *n)
4085 unsigned long flags;
4087 local_irq_save(flags);
4088 ____napi_schedule(&__get_cpu_var(softnet_data), n);
4089 local_irq_restore(flags);
4091 EXPORT_SYMBOL(__napi_schedule);
4093 void __napi_complete(struct napi_struct *n)
4095 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4096 BUG_ON(n->gro_list);
4098 list_del(&n->poll_list);
4099 smp_mb__before_clear_bit();
4100 clear_bit(NAPI_STATE_SCHED, &n->state);
4102 EXPORT_SYMBOL(__napi_complete);
4104 void napi_complete(struct napi_struct *n)
4106 unsigned long flags;
4109 * don't let napi dequeue from the cpu poll list
4110 * just in case its running on a different cpu
4112 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4115 napi_gro_flush(n, false);
4116 local_irq_save(flags);
4118 local_irq_restore(flags);
4120 EXPORT_SYMBOL(napi_complete);
4122 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4123 int (*poll)(struct napi_struct *, int), int weight)
4125 INIT_LIST_HEAD(&napi->poll_list);
4126 napi->gro_count = 0;
4127 napi->gro_list = NULL;
4130 if (weight > NAPI_POLL_WEIGHT)
4131 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4133 napi->weight = weight;
4134 list_add(&napi->dev_list, &dev->napi_list);
4136 #ifdef CONFIG_NETPOLL
4137 spin_lock_init(&napi->poll_lock);
4138 napi->poll_owner = -1;
4140 set_bit(NAPI_STATE_SCHED, &napi->state);
4142 EXPORT_SYMBOL(netif_napi_add);
4144 void netif_napi_del(struct napi_struct *napi)
4146 struct sk_buff *skb, *next;
4148 list_del_init(&napi->dev_list);
4149 napi_free_frags(napi);
4151 for (skb = napi->gro_list; skb; skb = next) {
4157 napi->gro_list = NULL;
4158 napi->gro_count = 0;
4160 EXPORT_SYMBOL(netif_napi_del);
4162 static void net_rx_action(struct softirq_action *h)
4164 struct softnet_data *sd = &__get_cpu_var(softnet_data);
4165 unsigned long time_limit = jiffies + 2;
4166 int budget = netdev_budget;
4169 local_irq_disable();
4171 while (!list_empty(&sd->poll_list)) {
4172 struct napi_struct *n;
4175 /* If softirq window is exhuasted then punt.
4176 * Allow this to run for 2 jiffies since which will allow
4177 * an average latency of 1.5/HZ.
4179 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
4184 /* Even though interrupts have been re-enabled, this
4185 * access is safe because interrupts can only add new
4186 * entries to the tail of this list, and only ->poll()
4187 * calls can remove this head entry from the list.
4189 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
4191 have = netpoll_poll_lock(n);
4195 /* This NAPI_STATE_SCHED test is for avoiding a race
4196 * with netpoll's poll_napi(). Only the entity which
4197 * obtains the lock and sees NAPI_STATE_SCHED set will
4198 * actually make the ->poll() call. Therefore we avoid
4199 * accidentally calling ->poll() when NAPI is not scheduled.
4202 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4203 work = n->poll(n, weight);
4207 WARN_ON_ONCE(work > weight);
4211 local_irq_disable();
4213 /* Drivers must not modify the NAPI state if they
4214 * consume the entire weight. In such cases this code
4215 * still "owns" the NAPI instance and therefore can
4216 * move the instance around on the list at-will.
4218 if (unlikely(work == weight)) {
4219 if (unlikely(napi_disable_pending(n))) {
4222 local_irq_disable();
4225 /* flush too old packets
4226 * If HZ < 1000, flush all packets.
4229 napi_gro_flush(n, HZ >= 1000);
4230 local_irq_disable();
4232 list_move_tail(&n->poll_list, &sd->poll_list);
4236 netpoll_poll_unlock(have);
4239 net_rps_action_and_irq_enable(sd);
4241 #ifdef CONFIG_NET_DMA
4243 * There may not be any more sk_buffs coming right now, so push
4244 * any pending DMA copies to hardware
4246 dma_issue_pending_all();
4253 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4257 struct netdev_upper {
4258 struct net_device *dev;
4260 struct list_head list;
4261 struct rcu_head rcu;
4262 struct list_head search_list;
4265 static void __append_search_uppers(struct list_head *search_list,
4266 struct net_device *dev)
4268 struct netdev_upper *upper;
4270 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4271 /* check if this upper is not already in search list */
4272 if (list_empty(&upper->search_list))
4273 list_add_tail(&upper->search_list, search_list);
4277 static bool __netdev_search_upper_dev(struct net_device *dev,
4278 struct net_device *upper_dev)
4280 LIST_HEAD(search_list);
4281 struct netdev_upper *upper;
4282 struct netdev_upper *tmp;
4285 __append_search_uppers(&search_list, dev);
4286 list_for_each_entry(upper, &search_list, search_list) {
4287 if (upper->dev == upper_dev) {
4291 __append_search_uppers(&search_list, upper->dev);
4293 list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4294 INIT_LIST_HEAD(&upper->search_list);
4298 static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4299 struct net_device *upper_dev)
4301 struct netdev_upper *upper;
4303 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4304 if (upper->dev == upper_dev)
4311 * netdev_has_upper_dev - Check if device is linked to an upper device
4313 * @upper_dev: upper device to check
4315 * Find out if a device is linked to specified upper device and return true
4316 * in case it is. Note that this checks only immediate upper device,
4317 * not through a complete stack of devices. The caller must hold the RTNL lock.
4319 bool netdev_has_upper_dev(struct net_device *dev,
4320 struct net_device *upper_dev)
4324 return __netdev_find_upper(dev, upper_dev);
4326 EXPORT_SYMBOL(netdev_has_upper_dev);
4329 * netdev_has_any_upper_dev - Check if device is linked to some device
4332 * Find out if a device is linked to an upper device and return true in case
4333 * it is. The caller must hold the RTNL lock.
4335 bool netdev_has_any_upper_dev(struct net_device *dev)
4339 return !list_empty(&dev->upper_dev_list);
4341 EXPORT_SYMBOL(netdev_has_any_upper_dev);
4344 * netdev_master_upper_dev_get - Get master upper device
4347 * Find a master upper device and return pointer to it or NULL in case
4348 * it's not there. The caller must hold the RTNL lock.
4350 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4352 struct netdev_upper *upper;
4356 if (list_empty(&dev->upper_dev_list))
4359 upper = list_first_entry(&dev->upper_dev_list,
4360 struct netdev_upper, list);
4361 if (likely(upper->master))
4365 EXPORT_SYMBOL(netdev_master_upper_dev_get);
4368 * netdev_master_upper_dev_get_rcu - Get master upper device
4371 * Find a master upper device and return pointer to it or NULL in case
4372 * it's not there. The caller must hold the RCU read lock.
4374 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4376 struct netdev_upper *upper;
4378 upper = list_first_or_null_rcu(&dev->upper_dev_list,
4379 struct netdev_upper, list);
4380 if (upper && likely(upper->master))
4384 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4386 static int __netdev_upper_dev_link(struct net_device *dev,
4387 struct net_device *upper_dev, bool master)
4389 struct netdev_upper *upper;
4393 if (dev == upper_dev)
4396 /* To prevent loops, check if dev is not upper device to upper_dev. */
4397 if (__netdev_search_upper_dev(upper_dev, dev))
4400 if (__netdev_find_upper(dev, upper_dev))
4403 if (master && netdev_master_upper_dev_get(dev))
4406 upper = kmalloc(sizeof(*upper), GFP_KERNEL);
4410 upper->dev = upper_dev;
4411 upper->master = master;
4412 INIT_LIST_HEAD(&upper->search_list);
4414 /* Ensure that master upper link is always the first item in list. */
4416 list_add_rcu(&upper->list, &dev->upper_dev_list);
4418 list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
4419 dev_hold(upper_dev);
4425 * netdev_upper_dev_link - Add a link to the upper device
4427 * @upper_dev: new upper device
4429 * Adds a link to device which is upper to this one. The caller must hold
4430 * the RTNL lock. On a failure a negative errno code is returned.
4431 * On success the reference counts are adjusted and the function
4434 int netdev_upper_dev_link(struct net_device *dev,
4435 struct net_device *upper_dev)
4437 return __netdev_upper_dev_link(dev, upper_dev, false);
4439 EXPORT_SYMBOL(netdev_upper_dev_link);
4442 * netdev_master_upper_dev_link - Add a master link to the upper device
4444 * @upper_dev: new upper device
4446 * Adds a link to device which is upper to this one. In this case, only
4447 * one master upper device can be linked, although other non-master devices
4448 * might be linked as well. The caller must hold the RTNL lock.
4449 * On a failure a negative errno code is returned. On success the reference
4450 * counts are adjusted and the function returns zero.
4452 int netdev_master_upper_dev_link(struct net_device *dev,
4453 struct net_device *upper_dev)
4455 return __netdev_upper_dev_link(dev, upper_dev, true);
4457 EXPORT_SYMBOL(netdev_master_upper_dev_link);
4460 * netdev_upper_dev_unlink - Removes a link to upper device
4462 * @upper_dev: new upper device
4464 * Removes a link to device which is upper to this one. The caller must hold
4467 void netdev_upper_dev_unlink(struct net_device *dev,
4468 struct net_device *upper_dev)
4470 struct netdev_upper *upper;
4474 upper = __netdev_find_upper(dev, upper_dev);
4477 list_del_rcu(&upper->list);
4479 kfree_rcu(upper, rcu);
4481 EXPORT_SYMBOL(netdev_upper_dev_unlink);
4483 static void dev_change_rx_flags(struct net_device *dev, int flags)
4485 const struct net_device_ops *ops = dev->netdev_ops;
4487 if (ops->ndo_change_rx_flags)
4488 ops->ndo_change_rx_flags(dev, flags);
4491 static int __dev_set_promiscuity(struct net_device *dev, int inc)
4493 unsigned int old_flags = dev->flags;
4499 dev->flags |= IFF_PROMISC;
4500 dev->promiscuity += inc;
4501 if (dev->promiscuity == 0) {
4504 * If inc causes overflow, untouch promisc and return error.
4507 dev->flags &= ~IFF_PROMISC;
4509 dev->promiscuity -= inc;
4510 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4515 if (dev->flags != old_flags) {
4516 pr_info("device %s %s promiscuous mode\n",
4518 dev->flags & IFF_PROMISC ? "entered" : "left");
4519 if (audit_enabled) {
4520 current_uid_gid(&uid, &gid);
4521 audit_log(current->audit_context, GFP_ATOMIC,
4522 AUDIT_ANOM_PROMISCUOUS,
4523 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4524 dev->name, (dev->flags & IFF_PROMISC),
4525 (old_flags & IFF_PROMISC),
4526 from_kuid(&init_user_ns, audit_get_loginuid(current)),
4527 from_kuid(&init_user_ns, uid),
4528 from_kgid(&init_user_ns, gid),
4529 audit_get_sessionid(current));
4532 dev_change_rx_flags(dev, IFF_PROMISC);
4538 * dev_set_promiscuity - update promiscuity count on a device
4542 * Add or remove promiscuity from a device. While the count in the device
4543 * remains above zero the interface remains promiscuous. Once it hits zero
4544 * the device reverts back to normal filtering operation. A negative inc
4545 * value is used to drop promiscuity on the device.
4546 * Return 0 if successful or a negative errno code on error.
4548 int dev_set_promiscuity(struct net_device *dev, int inc)
4550 unsigned int old_flags = dev->flags;
4553 err = __dev_set_promiscuity(dev, inc);
4556 if (dev->flags != old_flags)
4557 dev_set_rx_mode(dev);
4560 EXPORT_SYMBOL(dev_set_promiscuity);
4563 * dev_set_allmulti - update allmulti count on a device
4567 * Add or remove reception of all multicast frames to a device. While the
4568 * count in the device remains above zero the interface remains listening
4569 * to all interfaces. Once it hits zero the device reverts back to normal
4570 * filtering operation. A negative @inc value is used to drop the counter
4571 * when releasing a resource needing all multicasts.
4572 * Return 0 if successful or a negative errno code on error.
4575 int dev_set_allmulti(struct net_device *dev, int inc)
4577 unsigned int old_flags = dev->flags;
4581 dev->flags |= IFF_ALLMULTI;
4582 dev->allmulti += inc;
4583 if (dev->allmulti == 0) {
4586 * If inc causes overflow, untouch allmulti and return error.
4589 dev->flags &= ~IFF_ALLMULTI;
4591 dev->allmulti -= inc;
4592 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4597 if (dev->flags ^ old_flags) {
4598 dev_change_rx_flags(dev, IFF_ALLMULTI);
4599 dev_set_rx_mode(dev);
4603 EXPORT_SYMBOL(dev_set_allmulti);
4606 * Upload unicast and multicast address lists to device and
4607 * configure RX filtering. When the device doesn't support unicast
4608 * filtering it is put in promiscuous mode while unicast addresses
4611 void __dev_set_rx_mode(struct net_device *dev)
4613 const struct net_device_ops *ops = dev->netdev_ops;
4615 /* dev_open will call this function so the list will stay sane. */
4616 if (!(dev->flags&IFF_UP))
4619 if (!netif_device_present(dev))
4622 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4623 /* Unicast addresses changes may only happen under the rtnl,
4624 * therefore calling __dev_set_promiscuity here is safe.
4626 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4627 __dev_set_promiscuity(dev, 1);
4628 dev->uc_promisc = true;
4629 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4630 __dev_set_promiscuity(dev, -1);
4631 dev->uc_promisc = false;
4635 if (ops->ndo_set_rx_mode)
4636 ops->ndo_set_rx_mode(dev);
4638 EXPORT_SYMBOL(__dev_set_rx_mode);
4640 void dev_set_rx_mode(struct net_device *dev)
4642 netif_addr_lock_bh(dev);
4643 __dev_set_rx_mode(dev);
4644 netif_addr_unlock_bh(dev);
4648 * dev_get_flags - get flags reported to userspace
4651 * Get the combination of flag bits exported through APIs to userspace.
4653 unsigned int dev_get_flags(const struct net_device *dev)
4657 flags = (dev->flags & ~(IFF_PROMISC |
4662 (dev->gflags & (IFF_PROMISC |
4665 if (netif_running(dev)) {
4666 if (netif_oper_up(dev))
4667 flags |= IFF_RUNNING;
4668 if (netif_carrier_ok(dev))
4669 flags |= IFF_LOWER_UP;
4670 if (netif_dormant(dev))
4671 flags |= IFF_DORMANT;
4676 EXPORT_SYMBOL(dev_get_flags);
4678 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4680 unsigned int old_flags = dev->flags;
4686 * Set the flags on our device.
4689 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4690 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4692 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4696 * Load in the correct multicast list now the flags have changed.
4699 if ((old_flags ^ flags) & IFF_MULTICAST)
4700 dev_change_rx_flags(dev, IFF_MULTICAST);
4702 dev_set_rx_mode(dev);
4705 * Have we downed the interface. We handle IFF_UP ourselves
4706 * according to user attempts to set it, rather than blindly
4711 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4712 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4715 dev_set_rx_mode(dev);
4718 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4719 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4721 dev->gflags ^= IFF_PROMISC;
4722 dev_set_promiscuity(dev, inc);
4725 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4726 is important. Some (broken) drivers set IFF_PROMISC, when
4727 IFF_ALLMULTI is requested not asking us and not reporting.
4729 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4730 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4732 dev->gflags ^= IFF_ALLMULTI;
4733 dev_set_allmulti(dev, inc);
4739 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4741 unsigned int changes = dev->flags ^ old_flags;
4743 if (changes & IFF_UP) {
4744 if (dev->flags & IFF_UP)
4745 call_netdevice_notifiers(NETDEV_UP, dev);
4747 call_netdevice_notifiers(NETDEV_DOWN, dev);
4750 if (dev->flags & IFF_UP &&
4751 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4752 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4756 * dev_change_flags - change device settings
4758 * @flags: device state flags
4760 * Change settings on device based state flags. The flags are
4761 * in the userspace exported format.
4763 int dev_change_flags(struct net_device *dev, unsigned int flags)
4766 unsigned int changes, old_flags = dev->flags;
4768 ret = __dev_change_flags(dev, flags);
4772 changes = old_flags ^ dev->flags;
4774 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4776 __dev_notify_flags(dev, old_flags);
4779 EXPORT_SYMBOL(dev_change_flags);
4782 * dev_set_mtu - Change maximum transfer unit
4784 * @new_mtu: new transfer unit
4786 * Change the maximum transfer size of the network device.
4788 int dev_set_mtu(struct net_device *dev, int new_mtu)
4790 const struct net_device_ops *ops = dev->netdev_ops;
4793 if (new_mtu == dev->mtu)
4796 /* MTU must be positive. */
4800 if (!netif_device_present(dev))
4804 if (ops->ndo_change_mtu)
4805 err = ops->ndo_change_mtu(dev, new_mtu);
4810 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4813 EXPORT_SYMBOL(dev_set_mtu);
4816 * dev_set_group - Change group this device belongs to
4818 * @new_group: group this device should belong to
4820 void dev_set_group(struct net_device *dev, int new_group)
4822 dev->group = new_group;
4824 EXPORT_SYMBOL(dev_set_group);
4827 * dev_set_mac_address - Change Media Access Control Address
4831 * Change the hardware (MAC) address of the device
4833 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4835 const struct net_device_ops *ops = dev->netdev_ops;
4838 if (!ops->ndo_set_mac_address)
4840 if (sa->sa_family != dev->type)
4842 if (!netif_device_present(dev))
4844 err = ops->ndo_set_mac_address(dev, sa);
4847 dev->addr_assign_type = NET_ADDR_SET;
4848 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4849 add_device_randomness(dev->dev_addr, dev->addr_len);
4852 EXPORT_SYMBOL(dev_set_mac_address);
4855 * dev_change_carrier - Change device carrier
4857 * @new_carrier: new value
4859 * Change device carrier
4861 int dev_change_carrier(struct net_device *dev, bool new_carrier)
4863 const struct net_device_ops *ops = dev->netdev_ops;
4865 if (!ops->ndo_change_carrier)
4867 if (!netif_device_present(dev))
4869 return ops->ndo_change_carrier(dev, new_carrier);
4871 EXPORT_SYMBOL(dev_change_carrier);
4874 * dev_new_index - allocate an ifindex
4875 * @net: the applicable net namespace
4877 * Returns a suitable unique value for a new device interface
4878 * number. The caller must hold the rtnl semaphore or the
4879 * dev_base_lock to be sure it remains unique.
4881 static int dev_new_index(struct net *net)
4883 int ifindex = net->ifindex;
4887 if (!__dev_get_by_index(net, ifindex))
4888 return net->ifindex = ifindex;
4892 /* Delayed registration/unregisteration */
4893 static LIST_HEAD(net_todo_list);
4895 static void net_set_todo(struct net_device *dev)
4897 list_add_tail(&dev->todo_list, &net_todo_list);
4900 static void rollback_registered_many(struct list_head *head)
4902 struct net_device *dev, *tmp;
4904 BUG_ON(dev_boot_phase);
4907 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
4908 /* Some devices call without registering
4909 * for initialization unwind. Remove those
4910 * devices and proceed with the remaining.
4912 if (dev->reg_state == NETREG_UNINITIALIZED) {
4913 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
4917 list_del(&dev->unreg_list);
4920 dev->dismantle = true;
4921 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4924 /* If device is running, close it first. */
4925 dev_close_many(head);
4927 list_for_each_entry(dev, head, unreg_list) {
4928 /* And unlink it from device chain. */
4929 unlist_netdevice(dev);
4931 dev->reg_state = NETREG_UNREGISTERING;
4936 list_for_each_entry(dev, head, unreg_list) {
4937 /* Shutdown queueing discipline. */
4941 /* Notify protocols, that we are about to destroy
4942 this device. They should clean all the things.
4944 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4946 if (!dev->rtnl_link_ops ||
4947 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4948 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4951 * Flush the unicast and multicast chains
4956 if (dev->netdev_ops->ndo_uninit)
4957 dev->netdev_ops->ndo_uninit(dev);
4959 /* Notifier chain MUST detach us all upper devices. */
4960 WARN_ON(netdev_has_any_upper_dev(dev));
4962 /* Remove entries from kobject tree */
4963 netdev_unregister_kobject(dev);
4965 /* Remove XPS queueing entries */
4966 netif_reset_xps_queues_gt(dev, 0);
4972 list_for_each_entry(dev, head, unreg_list)
4976 static void rollback_registered(struct net_device *dev)
4980 list_add(&dev->unreg_list, &single);
4981 rollback_registered_many(&single);
4985 static netdev_features_t netdev_fix_features(struct net_device *dev,
4986 netdev_features_t features)
4988 /* Fix illegal checksum combinations */
4989 if ((features & NETIF_F_HW_CSUM) &&
4990 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4991 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
4992 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4995 /* TSO requires that SG is present as well. */
4996 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
4997 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
4998 features &= ~NETIF_F_ALL_TSO;
5001 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5002 !(features & NETIF_F_IP_CSUM)) {
5003 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5004 features &= ~NETIF_F_TSO;
5005 features &= ~NETIF_F_TSO_ECN;
5008 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5009 !(features & NETIF_F_IPV6_CSUM)) {
5010 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5011 features &= ~NETIF_F_TSO6;
5014 /* TSO ECN requires that TSO is present as well. */
5015 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5016 features &= ~NETIF_F_TSO_ECN;
5018 /* Software GSO depends on SG. */
5019 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5020 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5021 features &= ~NETIF_F_GSO;
5024 /* UFO needs SG and checksumming */
5025 if (features & NETIF_F_UFO) {
5026 /* maybe split UFO into V4 and V6? */
5027 if (!((features & NETIF_F_GEN_CSUM) ||
5028 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5029 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5031 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5032 features &= ~NETIF_F_UFO;
5035 if (!(features & NETIF_F_SG)) {
5037 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5038 features &= ~NETIF_F_UFO;
5045 int __netdev_update_features(struct net_device *dev)
5047 netdev_features_t features;
5052 features = netdev_get_wanted_features(dev);
5054 if (dev->netdev_ops->ndo_fix_features)
5055 features = dev->netdev_ops->ndo_fix_features(dev, features);
5057 /* driver might be less strict about feature dependencies */
5058 features = netdev_fix_features(dev, features);
5060 if (dev->features == features)
5063 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5064 &dev->features, &features);
5066 if (dev->netdev_ops->ndo_set_features)
5067 err = dev->netdev_ops->ndo_set_features(dev, features);
5069 if (unlikely(err < 0)) {
5071 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5072 err, &features, &dev->features);
5077 dev->features = features;
5083 * netdev_update_features - recalculate device features
5084 * @dev: the device to check
5086 * Recalculate dev->features set and send notifications if it
5087 * has changed. Should be called after driver or hardware dependent
5088 * conditions might have changed that influence the features.
5090 void netdev_update_features(struct net_device *dev)
5092 if (__netdev_update_features(dev))
5093 netdev_features_change(dev);
5095 EXPORT_SYMBOL(netdev_update_features);
5098 * netdev_change_features - recalculate device features
5099 * @dev: the device to check
5101 * Recalculate dev->features set and send notifications even
5102 * if they have not changed. Should be called instead of
5103 * netdev_update_features() if also dev->vlan_features might
5104 * have changed to allow the changes to be propagated to stacked
5107 void netdev_change_features(struct net_device *dev)
5109 __netdev_update_features(dev);
5110 netdev_features_change(dev);
5112 EXPORT_SYMBOL(netdev_change_features);
5115 * netif_stacked_transfer_operstate - transfer operstate
5116 * @rootdev: the root or lower level device to transfer state from
5117 * @dev: the device to transfer operstate to
5119 * Transfer operational state from root to device. This is normally
5120 * called when a stacking relationship exists between the root
5121 * device and the device(a leaf device).
5123 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5124 struct net_device *dev)
5126 if (rootdev->operstate == IF_OPER_DORMANT)
5127 netif_dormant_on(dev);
5129 netif_dormant_off(dev);
5131 if (netif_carrier_ok(rootdev)) {
5132 if (!netif_carrier_ok(dev))
5133 netif_carrier_on(dev);
5135 if (netif_carrier_ok(dev))
5136 netif_carrier_off(dev);
5139 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5142 static int netif_alloc_rx_queues(struct net_device *dev)
5144 unsigned int i, count = dev->num_rx_queues;
5145 struct netdev_rx_queue *rx;
5149 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5155 for (i = 0; i < count; i++)
5161 static void netdev_init_one_queue(struct net_device *dev,
5162 struct netdev_queue *queue, void *_unused)
5164 /* Initialize queue lock */
5165 spin_lock_init(&queue->_xmit_lock);
5166 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5167 queue->xmit_lock_owner = -1;
5168 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5171 dql_init(&queue->dql, HZ);
5175 static int netif_alloc_netdev_queues(struct net_device *dev)
5177 unsigned int count = dev->num_tx_queues;
5178 struct netdev_queue *tx;
5182 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5188 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5189 spin_lock_init(&dev->tx_global_lock);
5195 * register_netdevice - register a network device
5196 * @dev: device to register
5198 * Take a completed network device structure and add it to the kernel
5199 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5200 * chain. 0 is returned on success. A negative errno code is returned
5201 * on a failure to set up the device, or if the name is a duplicate.
5203 * Callers must hold the rtnl semaphore. You may want
5204 * register_netdev() instead of this.
5207 * The locking appears insufficient to guarantee two parallel registers
5208 * will not get the same name.
5211 int register_netdevice(struct net_device *dev)
5214 struct net *net = dev_net(dev);
5216 BUG_ON(dev_boot_phase);
5221 /* When net_device's are persistent, this will be fatal. */
5222 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5225 spin_lock_init(&dev->addr_list_lock);
5226 netdev_set_addr_lockdep_class(dev);
5230 ret = dev_get_valid_name(net, dev, dev->name);
5234 /* Init, if this function is available */
5235 if (dev->netdev_ops->ndo_init) {
5236 ret = dev->netdev_ops->ndo_init(dev);
5244 if (((dev->hw_features | dev->features) &
5245 NETIF_F_HW_VLAN_CTAG_FILTER) &&
5246 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5247 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5248 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5255 dev->ifindex = dev_new_index(net);
5256 else if (__dev_get_by_index(net, dev->ifindex))
5259 if (dev->iflink == -1)
5260 dev->iflink = dev->ifindex;
5262 /* Transfer changeable features to wanted_features and enable
5263 * software offloads (GSO and GRO).
5265 dev->hw_features |= NETIF_F_SOFT_FEATURES;
5266 dev->features |= NETIF_F_SOFT_FEATURES;
5267 dev->wanted_features = dev->features & dev->hw_features;
5269 /* Turn on no cache copy if HW is doing checksum */
5270 if (!(dev->flags & IFF_LOOPBACK)) {
5271 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5272 if (dev->features & NETIF_F_ALL_CSUM) {
5273 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5274 dev->features |= NETIF_F_NOCACHE_COPY;
5278 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5280 dev->vlan_features |= NETIF_F_HIGHDMA;
5282 /* Make NETIF_F_SG inheritable to tunnel devices.
5284 dev->hw_enc_features |= NETIF_F_SG;
5286 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5287 ret = notifier_to_errno(ret);
5291 ret = netdev_register_kobject(dev);
5294 dev->reg_state = NETREG_REGISTERED;
5296 __netdev_update_features(dev);
5299 * Default initial state at registry is that the
5300 * device is present.
5303 set_bit(__LINK_STATE_PRESENT, &dev->state);
5305 linkwatch_init_dev(dev);
5307 dev_init_scheduler(dev);
5309 list_netdevice(dev);
5310 add_device_randomness(dev->dev_addr, dev->addr_len);
5312 /* If the device has permanent device address, driver should
5313 * set dev_addr and also addr_assign_type should be set to
5314 * NET_ADDR_PERM (default value).
5316 if (dev->addr_assign_type == NET_ADDR_PERM)
5317 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5319 /* Notify protocols, that a new device appeared. */
5320 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5321 ret = notifier_to_errno(ret);
5323 rollback_registered(dev);
5324 dev->reg_state = NETREG_UNREGISTERED;
5327 * Prevent userspace races by waiting until the network
5328 * device is fully setup before sending notifications.
5330 if (!dev->rtnl_link_ops ||
5331 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5332 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5338 if (dev->netdev_ops->ndo_uninit)
5339 dev->netdev_ops->ndo_uninit(dev);
5342 EXPORT_SYMBOL(register_netdevice);
5345 * init_dummy_netdev - init a dummy network device for NAPI
5346 * @dev: device to init
5348 * This takes a network device structure and initialize the minimum
5349 * amount of fields so it can be used to schedule NAPI polls without
5350 * registering a full blown interface. This is to be used by drivers
5351 * that need to tie several hardware interfaces to a single NAPI
5352 * poll scheduler due to HW limitations.
5354 int init_dummy_netdev(struct net_device *dev)
5356 /* Clear everything. Note we don't initialize spinlocks
5357 * are they aren't supposed to be taken by any of the
5358 * NAPI code and this dummy netdev is supposed to be
5359 * only ever used for NAPI polls
5361 memset(dev, 0, sizeof(struct net_device));
5363 /* make sure we BUG if trying to hit standard
5364 * register/unregister code path
5366 dev->reg_state = NETREG_DUMMY;
5368 /* NAPI wants this */
5369 INIT_LIST_HEAD(&dev->napi_list);
5371 /* a dummy interface is started by default */
5372 set_bit(__LINK_STATE_PRESENT, &dev->state);
5373 set_bit(__LINK_STATE_START, &dev->state);
5375 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5376 * because users of this 'device' dont need to change
5382 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5386 * register_netdev - register a network device
5387 * @dev: device to register
5389 * Take a completed network device structure and add it to the kernel
5390 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5391 * chain. 0 is returned on success. A negative errno code is returned
5392 * on a failure to set up the device, or if the name is a duplicate.
5394 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5395 * and expands the device name if you passed a format string to
5398 int register_netdev(struct net_device *dev)
5403 err = register_netdevice(dev);
5407 EXPORT_SYMBOL(register_netdev);
5409 int netdev_refcnt_read(const struct net_device *dev)
5413 for_each_possible_cpu(i)
5414 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5417 EXPORT_SYMBOL(netdev_refcnt_read);
5420 * netdev_wait_allrefs - wait until all references are gone.
5421 * @dev: target net_device
5423 * This is called when unregistering network devices.
5425 * Any protocol or device that holds a reference should register
5426 * for netdevice notification, and cleanup and put back the
5427 * reference if they receive an UNREGISTER event.
5428 * We can get stuck here if buggy protocols don't correctly
5431 static void netdev_wait_allrefs(struct net_device *dev)
5433 unsigned long rebroadcast_time, warning_time;
5436 linkwatch_forget_dev(dev);
5438 rebroadcast_time = warning_time = jiffies;
5439 refcnt = netdev_refcnt_read(dev);
5441 while (refcnt != 0) {
5442 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5445 /* Rebroadcast unregister notification */
5446 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5452 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5453 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5455 /* We must not have linkwatch events
5456 * pending on unregister. If this
5457 * happens, we simply run the queue
5458 * unscheduled, resulting in a noop
5461 linkwatch_run_queue();
5466 rebroadcast_time = jiffies;
5471 refcnt = netdev_refcnt_read(dev);
5473 if (time_after(jiffies, warning_time + 10 * HZ)) {
5474 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5476 warning_time = jiffies;
5485 * register_netdevice(x1);
5486 * register_netdevice(x2);
5488 * unregister_netdevice(y1);
5489 * unregister_netdevice(y2);
5495 * We are invoked by rtnl_unlock().
5496 * This allows us to deal with problems:
5497 * 1) We can delete sysfs objects which invoke hotplug
5498 * without deadlocking with linkwatch via keventd.
5499 * 2) Since we run with the RTNL semaphore not held, we can sleep
5500 * safely in order to wait for the netdev refcnt to drop to zero.
5502 * We must not return until all unregister events added during
5503 * the interval the lock was held have been completed.
5505 void netdev_run_todo(void)
5507 struct list_head list;
5509 /* Snapshot list, allow later requests */
5510 list_replace_init(&net_todo_list, &list);
5515 /* Wait for rcu callbacks to finish before next phase */
5516 if (!list_empty(&list))
5519 while (!list_empty(&list)) {
5520 struct net_device *dev
5521 = list_first_entry(&list, struct net_device, todo_list);
5522 list_del(&dev->todo_list);
5525 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5528 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5529 pr_err("network todo '%s' but state %d\n",
5530 dev->name, dev->reg_state);
5535 dev->reg_state = NETREG_UNREGISTERED;
5537 on_each_cpu(flush_backlog, dev, 1);
5539 netdev_wait_allrefs(dev);
5542 BUG_ON(netdev_refcnt_read(dev));
5543 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5544 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5545 WARN_ON(dev->dn_ptr);
5547 if (dev->destructor)
5548 dev->destructor(dev);
5550 /* Free network device */
5551 kobject_put(&dev->dev.kobj);
5555 /* Convert net_device_stats to rtnl_link_stats64. They have the same
5556 * fields in the same order, with only the type differing.
5558 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5559 const struct net_device_stats *netdev_stats)
5561 #if BITS_PER_LONG == 64
5562 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5563 memcpy(stats64, netdev_stats, sizeof(*stats64));
5565 size_t i, n = sizeof(*stats64) / sizeof(u64);
5566 const unsigned long *src = (const unsigned long *)netdev_stats;
5567 u64 *dst = (u64 *)stats64;
5569 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5570 sizeof(*stats64) / sizeof(u64));
5571 for (i = 0; i < n; i++)
5575 EXPORT_SYMBOL(netdev_stats_to_stats64);
5578 * dev_get_stats - get network device statistics
5579 * @dev: device to get statistics from
5580 * @storage: place to store stats
5582 * Get network statistics from device. Return @storage.
5583 * The device driver may provide its own method by setting
5584 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5585 * otherwise the internal statistics structure is used.
5587 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5588 struct rtnl_link_stats64 *storage)
5590 const struct net_device_ops *ops = dev->netdev_ops;
5592 if (ops->ndo_get_stats64) {
5593 memset(storage, 0, sizeof(*storage));
5594 ops->ndo_get_stats64(dev, storage);
5595 } else if (ops->ndo_get_stats) {
5596 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5598 netdev_stats_to_stats64(storage, &dev->stats);
5600 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
5603 EXPORT_SYMBOL(dev_get_stats);
5605 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5607 struct netdev_queue *queue = dev_ingress_queue(dev);
5609 #ifdef CONFIG_NET_CLS_ACT
5612 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5615 netdev_init_one_queue(dev, queue, NULL);
5616 queue->qdisc = &noop_qdisc;
5617 queue->qdisc_sleeping = &noop_qdisc;
5618 rcu_assign_pointer(dev->ingress_queue, queue);
5623 static const struct ethtool_ops default_ethtool_ops;
5625 void netdev_set_default_ethtool_ops(struct net_device *dev,
5626 const struct ethtool_ops *ops)
5628 if (dev->ethtool_ops == &default_ethtool_ops)
5629 dev->ethtool_ops = ops;
5631 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
5634 * alloc_netdev_mqs - allocate network device
5635 * @sizeof_priv: size of private data to allocate space for
5636 * @name: device name format string
5637 * @setup: callback to initialize device
5638 * @txqs: the number of TX subqueues to allocate
5639 * @rxqs: the number of RX subqueues to allocate
5641 * Allocates a struct net_device with private data area for driver use
5642 * and performs basic initialization. Also allocates subquue structs
5643 * for each queue on the device.
5645 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5646 void (*setup)(struct net_device *),
5647 unsigned int txqs, unsigned int rxqs)
5649 struct net_device *dev;
5651 struct net_device *p;
5653 BUG_ON(strlen(name) >= sizeof(dev->name));
5656 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
5662 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
5667 alloc_size = sizeof(struct net_device);
5669 /* ensure 32-byte alignment of private area */
5670 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5671 alloc_size += sizeof_priv;
5673 /* ensure 32-byte alignment of whole construct */
5674 alloc_size += NETDEV_ALIGN - 1;
5676 p = kzalloc(alloc_size, GFP_KERNEL);
5680 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5681 dev->padded = (char *)dev - (char *)p;
5683 dev->pcpu_refcnt = alloc_percpu(int);
5684 if (!dev->pcpu_refcnt)
5687 if (dev_addr_init(dev))
5693 dev_net_set(dev, &init_net);
5695 dev->gso_max_size = GSO_MAX_SIZE;
5696 dev->gso_max_segs = GSO_MAX_SEGS;
5698 INIT_LIST_HEAD(&dev->napi_list);
5699 INIT_LIST_HEAD(&dev->unreg_list);
5700 INIT_LIST_HEAD(&dev->link_watch_list);
5701 INIT_LIST_HEAD(&dev->upper_dev_list);
5702 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5705 dev->num_tx_queues = txqs;
5706 dev->real_num_tx_queues = txqs;
5707 if (netif_alloc_netdev_queues(dev))
5711 dev->num_rx_queues = rxqs;
5712 dev->real_num_rx_queues = rxqs;
5713 if (netif_alloc_rx_queues(dev))
5717 strcpy(dev->name, name);
5718 dev->group = INIT_NETDEV_GROUP;
5719 if (!dev->ethtool_ops)
5720 dev->ethtool_ops = &default_ethtool_ops;
5728 free_percpu(dev->pcpu_refcnt);
5738 EXPORT_SYMBOL(alloc_netdev_mqs);
5741 * free_netdev - free network device
5744 * This function does the last stage of destroying an allocated device
5745 * interface. The reference to the device object is released.
5746 * If this is the last reference then it will be freed.
5748 void free_netdev(struct net_device *dev)
5750 struct napi_struct *p, *n;
5752 release_net(dev_net(dev));
5759 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
5761 /* Flush device addresses */
5762 dev_addr_flush(dev);
5764 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5767 free_percpu(dev->pcpu_refcnt);
5768 dev->pcpu_refcnt = NULL;
5770 /* Compatibility with error handling in drivers */
5771 if (dev->reg_state == NETREG_UNINITIALIZED) {
5772 kfree((char *)dev - dev->padded);
5776 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5777 dev->reg_state = NETREG_RELEASED;
5779 /* will free via device release */
5780 put_device(&dev->dev);
5782 EXPORT_SYMBOL(free_netdev);
5785 * synchronize_net - Synchronize with packet receive processing
5787 * Wait for packets currently being received to be done.
5788 * Does not block later packets from starting.
5790 void synchronize_net(void)
5793 if (rtnl_is_locked())
5794 synchronize_rcu_expedited();
5798 EXPORT_SYMBOL(synchronize_net);
5801 * unregister_netdevice_queue - remove device from the kernel
5805 * This function shuts down a device interface and removes it
5806 * from the kernel tables.
5807 * If head not NULL, device is queued to be unregistered later.
5809 * Callers must hold the rtnl semaphore. You may want
5810 * unregister_netdev() instead of this.
5813 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
5818 list_move_tail(&dev->unreg_list, head);
5820 rollback_registered(dev);
5821 /* Finish processing unregister after unlock */
5825 EXPORT_SYMBOL(unregister_netdevice_queue);
5828 * unregister_netdevice_many - unregister many devices
5829 * @head: list of devices
5831 void unregister_netdevice_many(struct list_head *head)
5833 struct net_device *dev;
5835 if (!list_empty(head)) {
5836 rollback_registered_many(head);
5837 list_for_each_entry(dev, head, unreg_list)
5841 EXPORT_SYMBOL(unregister_netdevice_many);
5844 * unregister_netdev - remove device from the kernel
5847 * This function shuts down a device interface and removes it
5848 * from the kernel tables.
5850 * This is just a wrapper for unregister_netdevice that takes
5851 * the rtnl semaphore. In general you want to use this and not
5852 * unregister_netdevice.
5854 void unregister_netdev(struct net_device *dev)
5857 unregister_netdevice(dev);
5860 EXPORT_SYMBOL(unregister_netdev);
5863 * dev_change_net_namespace - move device to different nethost namespace
5865 * @net: network namespace
5866 * @pat: If not NULL name pattern to try if the current device name
5867 * is already taken in the destination network namespace.
5869 * This function shuts down a device interface and moves it
5870 * to a new network namespace. On success 0 is returned, on
5871 * a failure a netagive errno code is returned.
5873 * Callers must hold the rtnl semaphore.
5876 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5882 /* Don't allow namespace local devices to be moved. */
5884 if (dev->features & NETIF_F_NETNS_LOCAL)
5887 /* Ensure the device has been registrered */
5888 if (dev->reg_state != NETREG_REGISTERED)
5891 /* Get out if there is nothing todo */
5893 if (net_eq(dev_net(dev), net))
5896 /* Pick the destination device name, and ensure
5897 * we can use it in the destination network namespace.
5900 if (__dev_get_by_name(net, dev->name)) {
5901 /* We get here if we can't use the current device name */
5904 if (dev_get_valid_name(net, dev, pat) < 0)
5909 * And now a mini version of register_netdevice unregister_netdevice.
5912 /* If device is running close it first. */
5915 /* And unlink it from device chain */
5917 unlist_netdevice(dev);
5921 /* Shutdown queueing discipline. */
5924 /* Notify protocols, that we are about to destroy
5925 this device. They should clean all the things.
5927 Note that dev->reg_state stays at NETREG_REGISTERED.
5928 This is wanted because this way 8021q and macvlan know
5929 the device is just moving and can keep their slaves up.
5931 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5933 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5934 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5937 * Flush the unicast and multicast chains
5942 /* Send a netdev-removed uevent to the old namespace */
5943 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
5945 /* Actually switch the network namespace */
5946 dev_net_set(dev, net);
5948 /* If there is an ifindex conflict assign a new one */
5949 if (__dev_get_by_index(net, dev->ifindex)) {
5950 int iflink = (dev->iflink == dev->ifindex);
5951 dev->ifindex = dev_new_index(net);
5953 dev->iflink = dev->ifindex;
5956 /* Send a netdev-add uevent to the new namespace */
5957 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
5959 /* Fixup kobjects */
5960 err = device_rename(&dev->dev, dev->name);
5963 /* Add the device back in the hashes */
5964 list_netdevice(dev);
5966 /* Notify protocols, that a new device appeared. */
5967 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5970 * Prevent userspace races by waiting until the network
5971 * device is fully setup before sending notifications.
5973 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5980 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
5982 static int dev_cpu_callback(struct notifier_block *nfb,
5983 unsigned long action,
5986 struct sk_buff **list_skb;
5987 struct sk_buff *skb;
5988 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5989 struct softnet_data *sd, *oldsd;
5991 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5994 local_irq_disable();
5995 cpu = smp_processor_id();
5996 sd = &per_cpu(softnet_data, cpu);
5997 oldsd = &per_cpu(softnet_data, oldcpu);
5999 /* Find end of our completion_queue. */
6000 list_skb = &sd->completion_queue;
6002 list_skb = &(*list_skb)->next;
6003 /* Append completion queue from offline CPU. */
6004 *list_skb = oldsd->completion_queue;
6005 oldsd->completion_queue = NULL;
6007 /* Append output queue from offline CPU. */
6008 if (oldsd->output_queue) {
6009 *sd->output_queue_tailp = oldsd->output_queue;
6010 sd->output_queue_tailp = oldsd->output_queue_tailp;
6011 oldsd->output_queue = NULL;
6012 oldsd->output_queue_tailp = &oldsd->output_queue;
6014 /* Append NAPI poll list from offline CPU. */
6015 if (!list_empty(&oldsd->poll_list)) {
6016 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6017 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6020 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6023 /* Process offline CPU's input_pkt_queue */
6024 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6026 input_queue_head_incr(oldsd);
6028 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6030 input_queue_head_incr(oldsd);
6038 * netdev_increment_features - increment feature set by one
6039 * @all: current feature set
6040 * @one: new feature set
6041 * @mask: mask feature set
6043 * Computes a new feature set after adding a device with feature set
6044 * @one to the master device with current feature set @all. Will not
6045 * enable anything that is off in @mask. Returns the new feature set.
6047 netdev_features_t netdev_increment_features(netdev_features_t all,
6048 netdev_features_t one, netdev_features_t mask)
6050 if (mask & NETIF_F_GEN_CSUM)
6051 mask |= NETIF_F_ALL_CSUM;
6052 mask |= NETIF_F_VLAN_CHALLENGED;
6054 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6055 all &= one | ~NETIF_F_ALL_FOR_ALL;
6057 /* If one device supports hw checksumming, set for all. */
6058 if (all & NETIF_F_GEN_CSUM)
6059 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6063 EXPORT_SYMBOL(netdev_increment_features);
6065 static struct hlist_head *netdev_create_hash(void)
6068 struct hlist_head *hash;
6070 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6072 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6073 INIT_HLIST_HEAD(&hash[i]);
6078 /* Initialize per network namespace state */
6079 static int __net_init netdev_init(struct net *net)
6081 if (net != &init_net)
6082 INIT_LIST_HEAD(&net->dev_base_head);
6084 net->dev_name_head = netdev_create_hash();
6085 if (net->dev_name_head == NULL)
6088 net->dev_index_head = netdev_create_hash();
6089 if (net->dev_index_head == NULL)
6095 kfree(net->dev_name_head);
6101 * netdev_drivername - network driver for the device
6102 * @dev: network device
6104 * Determine network driver for device.
6106 const char *netdev_drivername(const struct net_device *dev)
6108 const struct device_driver *driver;
6109 const struct device *parent;
6110 const char *empty = "";
6112 parent = dev->dev.parent;
6116 driver = parent->driver;
6117 if (driver && driver->name)
6118 return driver->name;
6122 static int __netdev_printk(const char *level, const struct net_device *dev,
6123 struct va_format *vaf)
6127 if (dev && dev->dev.parent) {
6128 r = dev_printk_emit(level[1] - '0',
6131 dev_driver_string(dev->dev.parent),
6132 dev_name(dev->dev.parent),
6133 netdev_name(dev), vaf);
6135 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6137 r = printk("%s(NULL net_device): %pV", level, vaf);
6143 int netdev_printk(const char *level, const struct net_device *dev,
6144 const char *format, ...)
6146 struct va_format vaf;
6150 va_start(args, format);
6155 r = __netdev_printk(level, dev, &vaf);
6161 EXPORT_SYMBOL(netdev_printk);
6163 #define define_netdev_printk_level(func, level) \
6164 int func(const struct net_device *dev, const char *fmt, ...) \
6167 struct va_format vaf; \
6170 va_start(args, fmt); \
6175 r = __netdev_printk(level, dev, &vaf); \
6181 EXPORT_SYMBOL(func);
6183 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6184 define_netdev_printk_level(netdev_alert, KERN_ALERT);
6185 define_netdev_printk_level(netdev_crit, KERN_CRIT);
6186 define_netdev_printk_level(netdev_err, KERN_ERR);
6187 define_netdev_printk_level(netdev_warn, KERN_WARNING);
6188 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6189 define_netdev_printk_level(netdev_info, KERN_INFO);
6191 static void __net_exit netdev_exit(struct net *net)
6193 kfree(net->dev_name_head);
6194 kfree(net->dev_index_head);
6197 static struct pernet_operations __net_initdata netdev_net_ops = {
6198 .init = netdev_init,
6199 .exit = netdev_exit,
6202 static void __net_exit default_device_exit(struct net *net)
6204 struct net_device *dev, *aux;
6206 * Push all migratable network devices back to the
6207 * initial network namespace
6210 for_each_netdev_safe(net, dev, aux) {
6212 char fb_name[IFNAMSIZ];
6214 /* Ignore unmoveable devices (i.e. loopback) */
6215 if (dev->features & NETIF_F_NETNS_LOCAL)
6218 /* Leave virtual devices for the generic cleanup */
6219 if (dev->rtnl_link_ops)
6222 /* Push remaining network devices to init_net */
6223 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6224 err = dev_change_net_namespace(dev, &init_net, fb_name);
6226 pr_emerg("%s: failed to move %s to init_net: %d\n",
6227 __func__, dev->name, err);
6234 static void __net_exit default_device_exit_batch(struct list_head *net_list)
6236 /* At exit all network devices most be removed from a network
6237 * namespace. Do this in the reverse order of registration.
6238 * Do this across as many network namespaces as possible to
6239 * improve batching efficiency.
6241 struct net_device *dev;
6243 LIST_HEAD(dev_kill_list);
6246 list_for_each_entry(net, net_list, exit_list) {
6247 for_each_netdev_reverse(net, dev) {
6248 if (dev->rtnl_link_ops)
6249 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6251 unregister_netdevice_queue(dev, &dev_kill_list);
6254 unregister_netdevice_many(&dev_kill_list);
6255 list_del(&dev_kill_list);
6259 static struct pernet_operations __net_initdata default_device_ops = {
6260 .exit = default_device_exit,
6261 .exit_batch = default_device_exit_batch,
6265 * Initialize the DEV module. At boot time this walks the device list and
6266 * unhooks any devices that fail to initialise (normally hardware not
6267 * present) and leaves us with a valid list of present and active devices.
6272 * This is called single threaded during boot, so no need
6273 * to take the rtnl semaphore.
6275 static int __init net_dev_init(void)
6277 int i, rc = -ENOMEM;
6279 BUG_ON(!dev_boot_phase);
6281 if (dev_proc_init())
6284 if (netdev_kobject_init())
6287 INIT_LIST_HEAD(&ptype_all);
6288 for (i = 0; i < PTYPE_HASH_SIZE; i++)
6289 INIT_LIST_HEAD(&ptype_base[i]);
6291 INIT_LIST_HEAD(&offload_base);
6293 if (register_pernet_subsys(&netdev_net_ops))
6297 * Initialise the packet receive queues.
6300 for_each_possible_cpu(i) {
6301 struct softnet_data *sd = &per_cpu(softnet_data, i);
6303 memset(sd, 0, sizeof(*sd));
6304 skb_queue_head_init(&sd->input_pkt_queue);
6305 skb_queue_head_init(&sd->process_queue);
6306 sd->completion_queue = NULL;
6307 INIT_LIST_HEAD(&sd->poll_list);
6308 sd->output_queue = NULL;
6309 sd->output_queue_tailp = &sd->output_queue;
6311 sd->csd.func = rps_trigger_softirq;
6317 sd->backlog.poll = process_backlog;
6318 sd->backlog.weight = weight_p;
6319 sd->backlog.gro_list = NULL;
6320 sd->backlog.gro_count = 0;
6325 /* The loopback device is special if any other network devices
6326 * is present in a network namespace the loopback device must
6327 * be present. Since we now dynamically allocate and free the
6328 * loopback device ensure this invariant is maintained by
6329 * keeping the loopback device as the first device on the
6330 * list of network devices. Ensuring the loopback devices
6331 * is the first device that appears and the last network device
6334 if (register_pernet_device(&loopback_net_ops))
6337 if (register_pernet_device(&default_device_ops))
6340 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6341 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6343 hotcpu_notifier(dev_cpu_callback, 0);
6350 subsys_initcall(net_dev_init);